Commit ec97a729 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:

 - Fix incorrect asm constraint for load_unaligned_zeropad() fixup

 - Fix thread flag update when setting TIF_MTE_ASYNC_FAULT

 - Fix restored irq state when handling fault on kprobe

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: kprobes: Restore local irqflag if kprobes is cancelled
  arm64: mte: Ensure TIF_MTE_ASYNC_FAULT is set atomically
  arm64: fix inline asm in load_unaligned_zeropad()
parents c17a3066 738fa58e
...@@ -1406,10 +1406,13 @@ config ARM64_PAN ...@@ -1406,10 +1406,13 @@ config ARM64_PAN
config AS_HAS_LDAPR config AS_HAS_LDAPR
def_bool $(as-instr,.arch_extension rcpc) def_bool $(as-instr,.arch_extension rcpc)
config AS_HAS_LSE_ATOMICS
def_bool $(as-instr,.arch_extension lse)
config ARM64_LSE_ATOMICS config ARM64_LSE_ATOMICS
bool bool
default ARM64_USE_LSE_ATOMICS default ARM64_USE_LSE_ATOMICS
depends on $(as-instr,.arch_extension lse) depends on AS_HAS_LSE_ATOMICS
config ARM64_USE_LSE_ATOMICS config ARM64_USE_LSE_ATOMICS
bool "Atomic instructions" bool "Atomic instructions"
...@@ -1666,6 +1669,7 @@ config ARM64_MTE ...@@ -1666,6 +1669,7 @@ config ARM64_MTE
default y default y
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
depends on AS_HAS_ARMV8_5 depends on AS_HAS_ARMV8_5
depends on AS_HAS_LSE_ATOMICS
# Required for tag checking in the uaccess routines # Required for tag checking in the uaccess routines
depends on ARM64_PAN depends on ARM64_PAN
select ARCH_USES_HIGH_VMA_FLAGS select ARCH_USES_HIGH_VMA_FLAGS
......
...@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask) ...@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
*/ */
static inline unsigned long load_unaligned_zeropad(const void *addr) static inline unsigned long load_unaligned_zeropad(const void *addr)
{ {
unsigned long ret, offset; unsigned long ret, tmp;
/* Load word from unaligned pointer addr */ /* Load word from unaligned pointer addr */
asm( asm(
...@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) ...@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
"2:\n" "2:\n"
" .pushsection .fixup,\"ax\"\n" " .pushsection .fixup,\"ax\"\n"
" .align 2\n" " .align 2\n"
"3: and %1, %2, #0x7\n" "3: bic %1, %2, #0x7\n"
" bic %2, %2, #0x7\n" " ldr %0, [%1]\n"
" ldr %0, [%2]\n" " and %1, %2, #0x7\n"
" lsl %1, %1, #0x3\n" " lsl %1, %1, #0x3\n"
#ifndef __AARCH64EB__ #ifndef __AARCH64EB__
" lsr %0, %0, %1\n" " lsr %0, %0, %1\n"
...@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) ...@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
" b 2b\n" " b 2b\n"
" .popsection\n" " .popsection\n"
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
: "=&r" (ret), "=&r" (offset) : "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Q" (*(unsigned long *)addr)); : "r" (addr), "Q" (*(unsigned long *)addr));
return ret; return ret;
......
...@@ -148,16 +148,18 @@ alternative_cb_end ...@@ -148,16 +148,18 @@ alternative_cb_end
.endm .endm
/* Check for MTE asynchronous tag check faults */ /* Check for MTE asynchronous tag check faults */
.macro check_mte_async_tcf, flgs, tmp .macro check_mte_async_tcf, tmp, ti_flags
#ifdef CONFIG_ARM64_MTE #ifdef CONFIG_ARM64_MTE
.arch_extension lse
alternative_if_not ARM64_MTE alternative_if_not ARM64_MTE
b 1f b 1f
alternative_else_nop_endif alternative_else_nop_endif
mrs_s \tmp, SYS_TFSRE0_EL1 mrs_s \tmp, SYS_TFSRE0_EL1
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT mov \tmp, #_TIF_MTE_ASYNC_FAULT
str \flgs, [tsk, #TSK_TI_FLAGS] add \ti_flags, tsk, #TSK_TI_FLAGS
stset \tmp, [\ti_flags]
msr_s SYS_TFSRE0_EL1, xzr msr_s SYS_TFSRE0_EL1, xzr
1: 1:
#endif #endif
...@@ -244,7 +246,7 @@ alternative_else_nop_endif ...@@ -244,7 +246,7 @@ alternative_else_nop_endif
disable_step_tsk x19, x20 disable_step_tsk x19, x20
/* Check for asynchronous tag check faults in user space */ /* Check for asynchronous tag check faults in user space */
check_mte_async_tcf x19, x22 check_mte_async_tcf x22, x23
apply_ssbd 1, x22, x23 apply_ssbd 1, x22, x23
ptrauth_keys_install_kernel tsk, x20, x22, x23 ptrauth_keys_install_kernel tsk, x20, x22, x23
......
...@@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) ...@@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
if (!instruction_pointer(regs)) if (!instruction_pointer(regs))
BUG(); BUG();
if (kcb->kprobe_status == KPROBE_REENTER) if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb); restore_previous_kprobe(kcb);
else } else {
kprobes_restore_local_irqflag(kcb, regs);
reset_current_kprobe(); reset_current_kprobe();
}
break; break;
case KPROBE_HIT_ACTIVE: case KPROBE_HIT_ACTIVE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment