Commit 6b0ef279 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Russell King (Oracle)

ARM: 9384/2: mm: Make tlbflush routines CFI safe

Instead of avoiding CFI entirely on the TLB flush helpers, reorganize
the code so that the CFI machinery can deal with it. The important
things to take into account are:
- functions in asm called indirectly from C need to be defined using
  SYM_TYPED_FUNC_START()
- a reference to the asm function needs to be visible to the compiler,
  in order to get it to emit the typeid symbol.

The latter means that defining the cpu_tlb_fns structs is best done from
C code, so that the references in the static initializers will be
visible to the compiler.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarSami Tolvanen <samitolvanen@google.com>
Signed-off-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
parent 4cece764
...@@ -62,6 +62,7 @@ obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions ...@@ -62,6 +62,7 @@ obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
obj-y += tlb.o
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
......
...@@ -338,21 +338,6 @@ ENTRY(\name\()_cache_fns) ...@@ -338,21 +338,6 @@ ENTRY(\name\()_cache_fns)
.size \name\()_cache_fns, . - \name\()_cache_fns .size \name\()_cache_fns, . - \name\()_cache_fns
.endm .endm
.macro define_tlb_functions name:req, flags_up:req, flags_smp
.type \name\()_tlb_fns, #object
.align 2
ENTRY(\name\()_tlb_fns)
.long \name\()_flush_user_tlb_range
.long \name\()_flush_kern_tlb_range
.ifnb \flags_smp
ALT_SMP(.long \flags_smp )
ALT_UP(.long \flags_up )
.else
.long \flags_up
.endif
.size \name\()_tlb_fns, . - \name\()_tlb_fns
.endm
.macro globl_equ x, y .macro globl_equ x, y
.globl \x .globl \x
.equ \x, \y .equ \x, \y
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -31,7 +32,7 @@ ...@@ -31,7 +32,7 @@
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
.align 4 .align 4
ENTRY(fa_flush_user_tlb_range) SYM_TYPED_FUNC_START(fa_flush_user_tlb_range)
vma_vm_mm ip, r2 vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ? eors r3, ip, r3 @ == mm ?
...@@ -46,9 +47,10 @@ ENTRY(fa_flush_user_tlb_range) ...@@ -46,9 +47,10 @@ ENTRY(fa_flush_user_tlb_range)
blo 1b blo 1b
mcr p15, 0, r3, c7, c10, 4 @ data write barrier mcr p15, 0, r3, c7, c10, 4 @ data write barrier
ret lr ret lr
SYM_FUNC_END(fa_flush_user_tlb_range)
ENTRY(fa_flush_kern_tlb_range) SYM_TYPED_FUNC_START(fa_flush_kern_tlb_range)
mov r3, #0 mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff bic r0, r0, #0x0ff
...@@ -60,8 +62,4 @@ ENTRY(fa_flush_kern_tlb_range) ...@@ -60,8 +62,4 @@ ENTRY(fa_flush_kern_tlb_range)
mcr p15, 0, r3, c7, c10, 4 @ data write barrier mcr p15, 0, r3, c7, c10, 4 @ data write barrier
mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
ret lr ret lr
SYM_FUNC_END(fa_flush_kern_tlb_range)
__INITDATA
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions fa, fa_tlb_flags
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -27,7 +28,7 @@ ...@@ -27,7 +28,7 @@
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
.align 5 .align 5
ENTRY(v4_flush_user_tlb_range) SYM_TYPED_FUNC_START(v4_flush_user_tlb_range)
vma_vm_mm ip, r2 vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ? eors r3, ip, r3 @ == mm ?
...@@ -40,6 +41,7 @@ ENTRY(v4_flush_user_tlb_range) ...@@ -40,6 +41,7 @@ ENTRY(v4_flush_user_tlb_range)
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
ret lr ret lr
SYM_FUNC_END(v4_flush_user_tlb_range)
/* /*
* v4_flush_kern_tlb_range(start, end) * v4_flush_kern_tlb_range(start, end)
...@@ -50,10 +52,11 @@ ENTRY(v4_flush_user_tlb_range) ...@@ -50,10 +52,11 @@ ENTRY(v4_flush_user_tlb_range)
* - start - virtual address (may not be aligned) * - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned) * - end - virtual address (may not be aligned)
*/ */
#ifdef CONFIG_CFI_CLANG
SYM_TYPED_FUNC_START(v4_flush_kern_tlb_range)
b .v4_flush_kern_tlb_range
SYM_FUNC_END(v4_flush_kern_tlb_range)
#else
.globl v4_flush_kern_tlb_range .globl v4_flush_kern_tlb_range
.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range .equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
#endif
__INITDATA
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v4, v4_tlb_flags
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -27,7 +28,7 @@ ...@@ -27,7 +28,7 @@
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
.align 5 .align 5
ENTRY(v4wb_flush_user_tlb_range) SYM_TYPED_FUNC_START(v4wb_flush_user_tlb_range)
vma_vm_mm ip, r2 vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ? eors r3, ip, r3 @ == mm ?
...@@ -43,6 +44,7 @@ ENTRY(v4wb_flush_user_tlb_range) ...@@ -43,6 +44,7 @@ ENTRY(v4wb_flush_user_tlb_range)
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
ret lr ret lr
SYM_FUNC_END(v4wb_flush_user_tlb_range)
/* /*
* v4_flush_kern_tlb_range(start, end) * v4_flush_kern_tlb_range(start, end)
...@@ -53,7 +55,7 @@ ENTRY(v4wb_flush_user_tlb_range) ...@@ -53,7 +55,7 @@ ENTRY(v4wb_flush_user_tlb_range)
* - start - virtual address (may not be aligned) * - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned) * - end - virtual address (may not be aligned)
*/ */
ENTRY(v4wb_flush_kern_tlb_range) SYM_TYPED_FUNC_START(v4wb_flush_kern_tlb_range)
mov r3, #0 mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff bic r0, r0, #0x0ff
...@@ -64,8 +66,4 @@ ENTRY(v4wb_flush_kern_tlb_range) ...@@ -64,8 +66,4 @@ ENTRY(v4wb_flush_kern_tlb_range)
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
ret lr ret lr
SYM_FUNC_END(v4wb_flush_kern_tlb_range)
__INITDATA
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v4wb, v4wb_tlb_flags
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -26,7 +27,7 @@ ...@@ -26,7 +27,7 @@
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
.align 5 .align 5
ENTRY(v4wbi_flush_user_tlb_range) SYM_TYPED_FUNC_START(v4wbi_flush_user_tlb_range)
vma_vm_mm ip, r2 vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ? eors r3, ip, r3 @ == mm ?
...@@ -43,8 +44,9 @@ ENTRY(v4wbi_flush_user_tlb_range) ...@@ -43,8 +44,9 @@ ENTRY(v4wbi_flush_user_tlb_range)
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
ret lr ret lr
SYM_FUNC_END(v4wbi_flush_user_tlb_range)
ENTRY(v4wbi_flush_kern_tlb_range) SYM_TYPED_FUNC_START(v4wbi_flush_kern_tlb_range)
mov r3, #0 mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff bic r0, r0, #0x0ff
...@@ -55,8 +57,4 @@ ENTRY(v4wbi_flush_kern_tlb_range) ...@@ -55,8 +57,4 @@ ENTRY(v4wbi_flush_kern_tlb_range)
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
ret lr ret lr
SYM_FUNC_END(v4wbi_flush_kern_tlb_range)
__INITDATA
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v4wbi, v4wbi_tlb_flags
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -32,7 +33,7 @@ ...@@ -32,7 +33,7 @@
* - the "Invalidate single entry" instruction will invalidate * - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs * both the I and the D TLBs on Harvard-style TLBs
*/ */
ENTRY(v6wbi_flush_user_tlb_range) SYM_TYPED_FUNC_START(v6wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm vma_vm_mm r3, r2 @ get vma->vm_mm
mov ip, #0 mov ip, #0
mmid r3, r3 @ get vm_mm->context.id mmid r3, r3 @ get vm_mm->context.id
...@@ -56,6 +57,7 @@ ENTRY(v6wbi_flush_user_tlb_range) ...@@ -56,6 +57,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
blo 1b blo 1b
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
ret lr ret lr
SYM_FUNC_END(v6wbi_flush_user_tlb_range)
/* /*
* v6wbi_flush_kern_tlb_range(start,end) * v6wbi_flush_kern_tlb_range(start,end)
...@@ -65,7 +67,7 @@ ENTRY(v6wbi_flush_user_tlb_range) ...@@ -65,7 +67,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
* - start - start address (may not be aligned) * - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned) * - end - end address (exclusive, may not be aligned)
*/ */
ENTRY(v6wbi_flush_kern_tlb_range) SYM_TYPED_FUNC_START(v6wbi_flush_kern_tlb_range)
mov r2, #0 mov r2, #0
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address mov r0, r0, lsr #PAGE_SHIFT @ align address
...@@ -85,8 +87,4 @@ ENTRY(v6wbi_flush_kern_tlb_range) ...@@ -85,8 +87,4 @@ ENTRY(v6wbi_flush_kern_tlb_range)
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
ret lr ret lr
SYM_FUNC_END(v6wbi_flush_kern_tlb_range)
__INIT
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v6wbi, v6wbi_tlb_flags
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -31,7 +32,7 @@ ...@@ -31,7 +32,7 @@
* - the "Invalidate single entry" instruction will invalidate * - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs * both the I and the D TLBs on Harvard-style TLBs
*/ */
ENTRY(v7wbi_flush_user_tlb_range) SYM_TYPED_FUNC_START(v7wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm vma_vm_mm r3, r2 @ get vma->vm_mm
mmid r3, r3 @ get vm_mm->context.id mmid r3, r3 @ get vm_mm->context.id
dsb ish dsb ish
...@@ -57,7 +58,7 @@ ENTRY(v7wbi_flush_user_tlb_range) ...@@ -57,7 +58,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
blo 1b blo 1b
dsb ish dsb ish
ret lr ret lr
ENDPROC(v7wbi_flush_user_tlb_range) SYM_FUNC_END(v7wbi_flush_user_tlb_range)
/* /*
* v7wbi_flush_kern_tlb_range(start,end) * v7wbi_flush_kern_tlb_range(start,end)
...@@ -67,7 +68,7 @@ ENDPROC(v7wbi_flush_user_tlb_range) ...@@ -67,7 +68,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
* - start - start address (may not be aligned) * - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned) * - end - end address (exclusive, may not be aligned)
*/ */
ENTRY(v7wbi_flush_kern_tlb_range) SYM_TYPED_FUNC_START(v7wbi_flush_kern_tlb_range)
dsb ish dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT mov r1, r1, lsr #PAGE_SHIFT
...@@ -86,9 +87,4 @@ ENTRY(v7wbi_flush_kern_tlb_range) ...@@ -86,9 +87,4 @@ ENTRY(v7wbi_flush_kern_tlb_range)
dsb ish dsb ish
isb isb
ret lr ret lr
ENDPROC(v7wbi_flush_kern_tlb_range) SYM_FUNC_END(v7wbi_flush_kern_tlb_range)
__INIT
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
// SPDX-License-Identifier: GPL-2.0-only
// Copyright 2024 Google LLC
// Author: Ard Biesheuvel <ardb@google.com>
#include <linux/types.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_CPU_TLB_V4WT
void v4_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
void v4_flush_kern_tlb_range(unsigned long, unsigned long);
struct cpu_tlb_fns v4_tlb_fns __initconst = {
.flush_user_range = v4_flush_user_tlb_range,
.flush_kern_range = v4_flush_kern_tlb_range,
.tlb_flags = v4_tlb_flags,
};
#endif
#ifdef CONFIG_CPU_TLB_V4WB
void v4wb_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
void v4wb_flush_kern_tlb_range(unsigned long, unsigned long);
struct cpu_tlb_fns v4wb_tlb_fns __initconst = {
.flush_user_range = v4wb_flush_user_tlb_range,
.flush_kern_range = v4wb_flush_kern_tlb_range,
.tlb_flags = v4wb_tlb_flags,
};
#endif
#if defined(CONFIG_CPU_TLB_V4WBI) || defined(CONFIG_CPU_TLB_FEROCEON)
void v4wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
void v4wbi_flush_kern_tlb_range(unsigned long, unsigned long);
struct cpu_tlb_fns v4wbi_tlb_fns __initconst = {
.flush_user_range = v4wbi_flush_user_tlb_range,
.flush_kern_range = v4wbi_flush_kern_tlb_range,
.tlb_flags = v4wbi_tlb_flags,
};
#endif
#ifdef CONFIG_CPU_TLB_V6
void v6wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
void v6wbi_flush_kern_tlb_range(unsigned long, unsigned long);
struct cpu_tlb_fns v6wbi_tlb_fns __initconst = {
.flush_user_range = v6wbi_flush_user_tlb_range,
.flush_kern_range = v6wbi_flush_kern_tlb_range,
.tlb_flags = v6wbi_tlb_flags,
};
#endif
#ifdef CONFIG_CPU_TLB_V7
void v7wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
void v7wbi_flush_kern_tlb_range(unsigned long, unsigned long);
struct cpu_tlb_fns v7wbi_tlb_fns __initconst = {
.flush_user_range = v7wbi_flush_user_tlb_range,
.flush_kern_range = v7wbi_flush_kern_tlb_range,
.tlb_flags = IS_ENABLED(CONFIG_SMP) ? v7wbi_tlb_flags_smp
: v7wbi_tlb_flags_up,
};
#ifdef CONFIG_SMP_ON_UP
/* This will be run-time patched so the offset better be right */
static_assert(offsetof(struct cpu_tlb_fns, tlb_flags) == 8);
asm(" .pushsection \".alt.smp.init\", \"a\" \n" \
" .align 2 \n" \
" .long v7wbi_tlb_fns + 8 - . \n" \
" .long " __stringify(v7wbi_tlb_flags_up) " \n" \
" .popsection \n");
#endif
#endif
#ifdef CONFIG_CPU_TLB_FA
void fa_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
void fa_flush_kern_tlb_range(unsigned long, unsigned long);
struct cpu_tlb_fns fa_tlb_fns __initconst = {
.flush_user_range = fa_flush_user_tlb_range,
.flush_kern_range = fa_flush_kern_tlb_range,
.tlb_flags = fa_tlb_flags,
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment