Commit 8ef8f360 authored by Dave Martin's avatar Dave Martin Committed by Catalin Marinas

arm64: Basic Branch Target Identification support

This patch adds the bare minimum required to expose the ARMv8.5
Branch Target Identification feature to userspace.

By itself, this does _not_ automatically enable BTI for any initial
executable pages mapped by execve().  This will come later, but for
now it should be possible to enable BTI manually on those pages by
using mprotect() from within the target process.

Other arches already using the generic mman.h are already using
0x10 for arch-specific prot flags, so we use that for PROT_BTI
here.

For consistency, signal handler entry points in BTI guarded pages
are required to be annotated as such, just like any other function.
This blocks a relatively minor attack vector, but comforming
userspace will have the annotations anyway, so we may as well
enforce them.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 00e19cee
...@@ -176,6 +176,8 @@ infrastructure: ...@@ -176,6 +176,8 @@ infrastructure:
+------------------------------+---------+---------+ +------------------------------+---------+---------+
| SSBS | [7-4] | y | | SSBS | [7-4] | y |
+------------------------------+---------+---------+ +------------------------------+---------+---------+
| BT | [3-0] | y |
+------------------------------+---------+---------+
4) MIDR_EL1 - Main ID Register 4) MIDR_EL1 - Main ID Register
......
...@@ -236,6 +236,11 @@ HWCAP2_RNG ...@@ -236,6 +236,11 @@ HWCAP2_RNG
Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001. Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001.
HWCAP2_BTI
Functionality implied by ID_AA64PFR0_EL1.BT == 0b0001.
4. Unused AT_HWCAP bits 4. Unused AT_HWCAP bits
----------------------- -----------------------
......
...@@ -58,7 +58,8 @@ ...@@ -58,7 +58,8 @@
#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48 #define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48
#define ARM64_HAS_E0PD 49 #define ARM64_HAS_E0PD 49
#define ARM64_HAS_RNG 50 #define ARM64_HAS_RNG 50
#define ARM64_BTI 51
#define ARM64_NCAPS 51 #define ARM64_NCAPS 52
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -613,6 +613,12 @@ static inline bool system_has_prio_mask_debugging(void) ...@@ -613,6 +613,12 @@ static inline bool system_has_prio_mask_debugging(void)
system_uses_irq_prio_masking(); system_uses_irq_prio_masking();
} }
static inline bool system_supports_bti(void)
{
return IS_ENABLED(CONFIG_ARM64_BTI) &&
cpus_have_const_cap(ARM64_BTI);
}
static inline bool system_capabilities_finalized(void) static inline bool system_capabilities_finalized(void)
{ {
return static_branch_likely(&arm64_const_caps_ready); return static_branch_likely(&arm64_const_caps_ready);
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */ #define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
/* Unallocated EC: 0x0A - 0x0B */ /* Unallocated EC: 0x0A - 0x0B */
#define ESR_ELx_EC_CP14_64 (0x0C) #define ESR_ELx_EC_CP14_64 (0x0C)
/* Unallocated EC: 0x0d */ #define ESR_ELx_EC_BTI (0x0D)
#define ESR_ELx_EC_ILL (0x0E) #define ESR_ELx_EC_ILL (0x0E)
/* Unallocated EC: 0x0F - 0x10 */ /* Unallocated EC: 0x0F - 0x10 */
#define ESR_ELx_EC_SVC32 (0x11) #define ESR_ELx_EC_SVC32 (0x11)
......
...@@ -34,6 +34,7 @@ static inline u32 disr_to_esr(u64 disr) ...@@ -34,6 +34,7 @@ static inline u32 disr_to_esr(u64 disr)
asmlinkage void enter_from_user_mode(void); asmlinkage void enter_from_user_mode(void);
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr); asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs); struct pt_regs *regs);
......
...@@ -94,6 +94,7 @@ ...@@ -94,6 +94,7 @@
#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16) #define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16)
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH) #define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG) #define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
/* /*
* This yields a mask that user programs can use to figure out what * This yields a mask that user programs can use to figure out what
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MMAN_H__
#define __ASM_MMAN_H__
#include <linux/compiler.h>
#include <linux/types.h>
#include <uapi/asm/mman.h>
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey __always_unused)
{
if (system_supports_bti() && (prot & PROT_BTI))
return VM_ARM64_BTI;
return 0;
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
return (vm_flags & VM_ARM64_BTI) ? __pgprot(PTE_GP) : __pgprot(0);
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
static inline bool arch_validate_prot(unsigned long prot,
unsigned long addr __always_unused)
{
unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
if (system_supports_bti())
supported |= PROT_BTI;
return (prot & ~supported) == 0;
}
#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
#endif /* ! __ASM_MMAN_H__ */
...@@ -151,6 +151,7 @@ ...@@ -151,6 +151,7 @@
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
#define PTE_GP (_AT(pteval_t, 1) << 50) /* BTI guarded */
#define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */ #define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */ #define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
......
...@@ -660,7 +660,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd) ...@@ -660,7 +660,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
PTE_PROT_NONE | PTE_VALID | PTE_WRITE; PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP;
/* preserve the hardware dirty information */ /* preserve the hardware dirty information */
if (pte_hw_dirty(pte)) if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte); pte = pte_mkdirty(pte);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#define GIC_PRIO_PSR_I_SET (1 << 4) #define GIC_PRIO_PSR_I_SET (1 << 4)
/* Additional SPSR bits not exposed in the UABI */ /* Additional SPSR bits not exposed in the UABI */
#define PSR_IL_BIT (1 << 20) #define PSR_IL_BIT (1 << 20)
/* AArch32-specific ptrace requests */ /* AArch32-specific ptrace requests */
......
...@@ -514,6 +514,8 @@ ...@@ -514,6 +514,8 @@
#endif #endif
/* SCTLR_EL1 specific flags. */ /* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_BT1 (BIT(36))
#define SCTLR_EL1_BT0 (BIT(35))
#define SCTLR_EL1_UCI (BIT(26)) #define SCTLR_EL1_UCI (BIT(26))
#define SCTLR_EL1_E0E (BIT(24)) #define SCTLR_EL1_E0E (BIT(24))
#define SCTLR_EL1_SPAN (BIT(23)) #define SCTLR_EL1_SPAN (BIT(23))
...@@ -620,10 +622,12 @@ ...@@ -620,10 +622,12 @@
/* id_aa64pfr1 */ /* id_aa64pfr1 */
#define ID_AA64PFR1_SSBS_SHIFT 4 #define ID_AA64PFR1_SSBS_SHIFT 4
#define ID_AA64PFR1_BT_SHIFT 0
#define ID_AA64PFR1_SSBS_PSTATE_NI 0 #define ID_AA64PFR1_SSBS_PSTATE_NI 0
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 #define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 #define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
/* id_aa64zfr0 */ /* id_aa64zfr0 */
#define ID_AA64ZFR0_F64MM_SHIFT 56 #define ID_AA64ZFR0_F64MM_SHIFT 56
......
...@@ -73,5 +73,6 @@ ...@@ -73,5 +73,6 @@
#define HWCAP2_BF16 (1 << 14) #define HWCAP2_BF16 (1 << 14)
#define HWCAP2_DGH (1 << 15) #define HWCAP2_DGH (1 << 15)
#define HWCAP2_RNG (1 << 16) #define HWCAP2_RNG (1 << 16)
#define HWCAP2_BTI (1 << 17)
#endif /* _UAPI__ASM_HWCAP_H */ #endif /* _UAPI__ASM_HWCAP_H */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_MMAN_H
#define _UAPI__ASM_MMAN_H
#include <asm-generic/mman.h>
#define PROT_BTI 0x10 /* BTI guarded page */
#endif /* ! _UAPI__ASM_MMAN_H */
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#define PSR_I_BIT 0x00000080 #define PSR_I_BIT 0x00000080
#define PSR_A_BIT 0x00000100 #define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200 #define PSR_D_BIT 0x00000200
#define PSR_BTYPE_MASK 0x00000c00
#define PSR_SSBS_BIT 0x00001000 #define PSR_SSBS_BIT 0x00001000
#define PSR_PAN_BIT 0x00400000 #define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000 #define PSR_UAO_BIT 0x00800000
...@@ -55,6 +56,8 @@ ...@@ -55,6 +56,8 @@
#define PSR_Z_BIT 0x40000000 #define PSR_Z_BIT 0x40000000
#define PSR_N_BIT 0x80000000 #define PSR_N_BIT 0x80000000
#define PSR_BTYPE_SHIFT 10
/* /*
* Groups of PSR bits * Groups of PSR bits
*/ */
...@@ -63,6 +66,12 @@ ...@@ -63,6 +66,12 @@
#define PSR_x 0x0000ff00 /* Extension */ #define PSR_x 0x0000ff00 /* Extension */
#define PSR_c 0x000000ff /* Control */ #define PSR_c 0x000000ff /* Control */
/* Convenience names for the values of PSTATE.BTYPE */
#define PSR_BTYPE_NONE (0b00 << PSR_BTYPE_SHIFT)
#define PSR_BTYPE_JC (0b01 << PSR_BTYPE_SHIFT)
#define PSR_BTYPE_C (0b10 << PSR_BTYPE_SHIFT)
#define PSR_BTYPE_J (0b11 << PSR_BTYPE_SHIFT)
/* syscall emulation path in ptrace */ /* syscall emulation path in ptrace */
#define PTRACE_SYSEMU 31 #define PTRACE_SYSEMU 31
#define PTRACE_SYSEMU_SINGLESTEP 32 #define PTRACE_SYSEMU_SINGLESTEP 32
......
...@@ -179,6 +179,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ...@@ -179,6 +179,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -1347,6 +1349,21 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, ...@@ -1347,6 +1349,21 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
} }
#endif #endif
#ifdef CONFIG_ARM64_BTI
static void bti_enable(const struct arm64_cpu_capabilities *__unused)
{
/*
* Use of X16/X17 for tail-calls and trampolines that jump to
* function entry points using BR is a requirement for
* marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI.
* So, be strict and forbid other BRs using other registers to
* jump onto a PACIxSP instruction:
*/
sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
isb();
}
#endif /* CONFIG_ARM64_BTI */
static const struct arm64_cpu_capabilities arm64_features[] = { static const struct arm64_cpu_capabilities arm64_features[] = {
{ {
.desc = "GIC system register CPU interface", .desc = "GIC system register CPU interface",
...@@ -1671,6 +1688,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1671,6 +1688,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 1, .min_field_value = 1,
}, },
#endif
#ifdef CONFIG_ARM64_BTI
{
.desc = "Branch Target Identification",
.capability = ARM64_BTI,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.cpu_enable = bti_enable,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_BT_SHIFT,
.min_field_value = ID_AA64PFR1_BT_BTI,
.sign = FTR_UNSIGNED,
},
#endif #endif
{}, {},
}; };
...@@ -1781,6 +1811,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { ...@@ -1781,6 +1811,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
#endif #endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_BTI
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
#endif
#ifdef CONFIG_ARM64_PTR_AUTH #ifdef CONFIG_ARM64_PTR_AUTH
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
......
...@@ -92,6 +92,7 @@ static const char *const hwcap_str[] = { ...@@ -92,6 +92,7 @@ static const char *const hwcap_str[] = {
"bf16", "bf16",
"dgh", "dgh",
"rng", "rng",
"bti",
NULL NULL
}; };
......
...@@ -188,6 +188,14 @@ static void notrace el0_undef(struct pt_regs *regs) ...@@ -188,6 +188,14 @@ static void notrace el0_undef(struct pt_regs *regs)
} }
NOKPROBE_SYMBOL(el0_undef); NOKPROBE_SYMBOL(el0_undef);
static void notrace el0_bti(struct pt_regs *regs)
{
user_exit_irqoff();
local_daif_restore(DAIF_PROCCTX);
do_bti(regs);
}
NOKPROBE_SYMBOL(el0_bti);
static void notrace el0_inv(struct pt_regs *regs, unsigned long esr) static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
{ {
user_exit_irqoff(); user_exit_irqoff();
...@@ -255,6 +263,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs) ...@@ -255,6 +263,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_UNKNOWN: case ESR_ELx_EC_UNKNOWN:
el0_undef(regs); el0_undef(regs);
break; break;
case ESR_ELx_EC_BTI:
el0_bti(regs);
break;
case ESR_ELx_EC_BREAKPT_LOW: case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_SOFTSTP_LOW: case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_WATCHPT_LOW: case ESR_ELx_EC_WATCHPT_LOW:
......
...@@ -1874,7 +1874,7 @@ void syscall_trace_exit(struct pt_regs *regs) ...@@ -1874,7 +1874,7 @@ void syscall_trace_exit(struct pt_regs *regs)
*/ */
#define SPSR_EL1_AARCH64_RES0_BITS \ #define SPSR_EL1_AARCH64_RES0_BITS \
(GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \ #define SPSR_EL1_AARCH32_RES0_BITS \
(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
......
...@@ -732,6 +732,22 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, ...@@ -732,6 +732,22 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
regs->regs[29] = (unsigned long)&user->next_frame->fp; regs->regs[29] = (unsigned long)&user->next_frame->fp;
regs->pc = (unsigned long)ka->sa.sa_handler; regs->pc = (unsigned long)ka->sa.sa_handler;
/*
* Signal delivery is a (wacky) indirect function call in
* userspace, so simulate the same setting of BTYPE as a BLR
* <register containing the signal handler entry point>.
* Signal delivery to a location in a PROT_BTI guarded page
* that is not a function entry point will now trigger a
* SIGILL in userspace.
*
* If the signal handler entry point is not in a PROT_BTI
* guarded page, this is harmless.
*/
if (system_supports_bti()) {
regs->pstate &= ~PSR_BTYPE_MASK;
regs->pstate |= PSR_BTYPE_C;
}
if (ka->sa.sa_flags & SA_RESTORER) if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer; sigtramp = ka->sa.sa_restorer;
else else
......
...@@ -98,6 +98,24 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, ...@@ -98,6 +98,24 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
regs->orig_x0 = regs->regs[0]; regs->orig_x0 = regs->regs[0];
regs->syscallno = scno; regs->syscallno = scno;
/*
* BTI note:
* The architecture does not guarantee that SPSR.BTYPE is zero
* on taking an SVC, so we could return to userspace with a
* non-zero BTYPE after the syscall.
*
* This shouldn't matter except when userspace is explicitly
* doing something stupid, such as setting PROT_BTI on a page
* that lacks conforming BTI/PACIxSP instructions, falling
* through from one executable page to another with differing
* PROT_BTI, or messing with BTYPE via ptrace: in such cases,
* userspace should not be surprised if a SIGILL occurs on
* syscall return.
*
* So, don't touch regs->pstate & PSR_BTYPE_MASK here.
* (Similarly for HVC and SMC elsewhere.)
*/
cortex_a76_erratum_1463225_svc_handler(); cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
user_exit(); user_exit();
......
...@@ -411,6 +411,13 @@ void do_undefinstr(struct pt_regs *regs) ...@@ -411,6 +411,13 @@ void do_undefinstr(struct pt_regs *regs)
} }
NOKPROBE_SYMBOL(do_undefinstr); NOKPROBE_SYMBOL(do_undefinstr);
void do_bti(struct pt_regs *regs)
{
BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
}
NOKPROBE_SYMBOL(do_bti);
#define __user_cache_maint(insn, address, res) \ #define __user_cache_maint(insn, address, res) \
if (address >= user_addr_max()) { \ if (address >= user_addr_max()) { \
res = -EFAULT; \ res = -EFAULT; \
...@@ -753,6 +760,7 @@ static const char *esr_class_str[] = { ...@@ -753,6 +760,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
[ESR_ELx_EC_PAC] = "PAC", [ESR_ELx_EC_PAC] = "PAC",
[ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
[ESR_ELx_EC_BTI] = "BTI",
[ESR_ELx_EC_ILL] = "PSTATE.IL", [ESR_ELx_EC_ILL] = "PSTATE.IL",
[ESR_ELx_EC_SVC32] = "SVC (AArch32)", [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
[ESR_ELx_EC_HVC32] = "HVC (AArch32)", [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
......
...@@ -324,6 +324,9 @@ extern unsigned int kobjsize(const void *objp); ...@@ -324,6 +324,9 @@ extern unsigned int kobjsize(const void *objp);
#elif defined(CONFIG_SPARC64) #elif defined(CONFIG_SPARC64)
# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
# define VM_ARCH_CLEAR VM_SPARC_ADI # define VM_ARCH_CLEAR VM_SPARC_ADI
#elif defined(CONFIG_ARM64)
# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */
# define VM_ARCH_CLEAR VM_ARM64_BTI
#elif !defined(CONFIG_MMU) #elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment