Commit e0f7a8d5 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/uaccess' into for-next/core

* for-next/uaccess:
  : uaccess routines clean-up and set_fs() removal
  arm64: mark __system_matches_cap as __maybe_unused
  arm64: uaccess: remove vestigal UAO support
  arm64: uaccess: remove redundant PAN toggling
  arm64: uaccess: remove addr_limit_user_check()
  arm64: uaccess: remove set_fs()
  arm64: uaccess cleanup macro naming
  arm64: uaccess: split user/kernel routines
  arm64: uaccess: refactor __{get,put}_user
  arm64: uaccess: simplify __copy_user_flushcache()
  arm64: uaccess: rename privileged uaccess routines
  arm64: sdei: explicitly simulate PAN/UAO entry
  arm64: sdei: move uaccess logic to arch/arm64/
  arm64: head.S: always initialize PSTATE
  arm64: head.S: cleanup SCTLR_ELx initialization
  arm64: head.S: rename el2_setup -> init_kernel_el
  arm64: add C wrappers for SET_PSTATE_*()
  arm64: ensure ERET from kthread is illegal
parents 3c09ec59 701f4906
...@@ -195,7 +195,6 @@ config ARM64 ...@@ -195,7 +195,6 @@ config ARM64
select PCI_SYSCALL if PCI select PCI_SYSCALL if PCI
select POWER_RESET select POWER_RESET
select POWER_SUPPLY select POWER_SUPPLY
select SET_FS
select SPARSE_IRQ select SPARSE_IRQ
select SWIOTLB select SWIOTLB
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
...@@ -1428,27 +1427,6 @@ endmenu ...@@ -1428,27 +1427,6 @@ endmenu
menu "ARMv8.2 architectural features" menu "ARMv8.2 architectural features"
config ARM64_UAO
bool "Enable support for User Access Override (UAO)"
default y
help
User Access Override (UAO; part of the ARMv8.2 Extensions)
causes the 'unprivileged' variant of the load/store instructions to
be overridden to be privileged.
This option changes get_user() and friends to use the 'unprivileged'
variant of the load/store instructions. This ensures that user-space
really did have access to the supplied memory. When addr_limit is
set to kernel memory the UAO bit will be set, allowing privileged
access to kernel memory.
Choosing this option will cause copy_to_user() et al to use user-space
memory permissions.
The feature is detected at runtime, the kernel will use the
regular load/store instructions if the cpu does not implement the
feature.
config ARM64_PMEM config ARM64_PMEM
bool "Enable support for persistent memory" bool "Enable support for persistent memory"
select ARCH_HAS_PMEM_API select ARCH_HAS_PMEM_API
......
...@@ -59,62 +59,32 @@ alternative_else_nop_endif ...@@ -59,62 +59,32 @@ alternative_else_nop_endif
#endif #endif
/* /*
* Generate the assembly for UAO alternatives with exception table entries. * Generate the assembly for LDTR/STTR with exception table entries.
* This is complicated as there is no post-increment or pair versions of the * This is complicated as there is no post-increment or pair versions of the
* unprivileged instructions, and USER() only works for single instructions. * unprivileged instructions, and USER() only works for single instructions.
*/ */
#ifdef CONFIG_ARM64_UAO .macro user_ldp l, reg1, reg2, addr, post_inc
.macro uao_ldp l, reg1, reg2, addr, post_inc 8888: ldtr \reg1, [\addr];
alternative_if_not ARM64_HAS_UAO 8889: ldtr \reg2, [\addr, #8];
8888: ldp \reg1, \reg2, [\addr], \post_inc; add \addr, \addr, \post_inc;
8889: nop;
nop;
alternative_else
ldtr \reg1, [\addr];
ldtr \reg2, [\addr, #8];
add \addr, \addr, \post_inc;
alternative_endif
_asm_extable 8888b,\l; _asm_extable 8888b,\l;
_asm_extable 8889b,\l; _asm_extable 8889b,\l;
.endm .endm
.macro uao_stp l, reg1, reg2, addr, post_inc .macro user_stp l, reg1, reg2, addr, post_inc
alternative_if_not ARM64_HAS_UAO 8888: sttr \reg1, [\addr];
8888: stp \reg1, \reg2, [\addr], \post_inc; 8889: sttr \reg2, [\addr, #8];
8889: nop; add \addr, \addr, \post_inc;
nop;
alternative_else
sttr \reg1, [\addr];
sttr \reg2, [\addr, #8];
add \addr, \addr, \post_inc;
alternative_endif
_asm_extable 8888b,\l; _asm_extable 8888b,\l;
_asm_extable 8889b,\l; _asm_extable 8889b,\l;
.endm .endm
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc .macro user_ldst l, inst, reg, addr, post_inc
alternative_if_not ARM64_HAS_UAO 8888: \inst \reg, [\addr];
8888: \inst \reg, [\addr], \post_inc; add \addr, \addr, \post_inc;
nop;
alternative_else
\alt_inst \reg, [\addr];
add \addr, \addr, \post_inc;
alternative_endif
_asm_extable 8888b,\l; _asm_extable 8888b,\l;
.endm .endm
#else
.macro uao_ldp l, reg1, reg2, addr, post_inc
USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
.endm
.macro uao_stp l, reg1, reg2, addr, post_inc
USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
.endm
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
USER(\l, \inst \reg, [\addr], \post_inc)
.endm
#endif
#endif #endif
...@@ -16,8 +16,6 @@ ...@@ -16,8 +16,6 @@
#define ARM64_WORKAROUND_CAVIUM_23154 6 #define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7 #define ARM64_WORKAROUND_834220 7
#define ARM64_HAS_NO_HW_PREFETCH 8 #define ARM64_HAS_NO_HW_PREFETCH 8
#define ARM64_HAS_UAO 9
#define ARM64_ALT_PAN_NOT_UAO 10
#define ARM64_HAS_VIRT_HOST_EXTN 11 #define ARM64_HAS_VIRT_HOST_EXTN 11
#define ARM64_WORKAROUND_CAVIUM_27456 12 #define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13 #define ARM64_HAS_32BIT_EL0 13
......
...@@ -667,10 +667,16 @@ static __always_inline bool system_supports_fpsimd(void) ...@@ -667,10 +667,16 @@ static __always_inline bool system_supports_fpsimd(void)
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
} }
static inline bool system_uses_hw_pan(void)
{
return IS_ENABLED(CONFIG_ARM64_PAN) &&
cpus_have_const_cap(ARM64_HAS_PAN);
}
static inline bool system_uses_ttbr0_pan(void) static inline bool system_uses_ttbr0_pan(void)
{ {
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
!cpus_have_const_cap(ARM64_HAS_PAN); !system_uses_hw_pan();
} }
static __always_inline bool system_supports_sve(void) static __always_inline bool system_supports_sve(void)
...@@ -762,6 +768,13 @@ static inline bool cpu_has_hw_af(void) ...@@ -762,6 +768,13 @@ static inline bool cpu_has_hw_af(void)
ID_AA64MMFR1_HADBS_SHIFT); ID_AA64MMFR1_HADBS_SHIFT);
} }
static inline bool cpu_has_pan(void)
{
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_PAN_SHIFT);
}
#ifdef CONFIG_ARM64_AMU_EXTN #ifdef CONFIG_ARM64_AMU_EXTN
/* Check whether the cpu supports the Activity Monitors Unit (AMU) */ /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
extern bool cpu_has_amu_feat(int cpu); extern bool cpu_has_amu_feat(int cpu);
......
...@@ -10,6 +10,5 @@ ...@@ -10,6 +10,5 @@
#include <linux/sched.h> #include <linux/sched.h>
extern unsigned long arch_align_stack(unsigned long sp); extern unsigned long arch_align_stack(unsigned long sp);
void uao_thread_switch(struct task_struct *next);
#endif /* __ASM_EXEC_H */ #endif /* __ASM_EXEC_H */
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
do { \ do { \
unsigned int loops = FUTEX_MAX_LOOPS; \ unsigned int loops = FUTEX_MAX_LOOPS; \
\ \
uaccess_enable(); \ uaccess_enable_privileged(); \
asm volatile( \ asm volatile( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \ "1: ldxr %w1, %2\n" \
...@@ -39,7 +39,7 @@ do { \ ...@@ -39,7 +39,7 @@ do { \
"+r" (loops) \ "+r" (loops) \
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \ : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
: "memory"); \ : "memory"); \
uaccess_disable(); \ uaccess_disable_privileged(); \
} while (0) } while (0)
static inline int static inline int
...@@ -95,7 +95,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, ...@@ -95,7 +95,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
return -EFAULT; return -EFAULT;
uaddr = __uaccess_mask_ptr(_uaddr); uaddr = __uaccess_mask_ptr(_uaddr);
uaccess_enable(); uaccess_enable_privileged();
asm volatile("// futex_atomic_cmpxchg_inatomic\n" asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n" " prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n" "1: ldxr %w1, %2\n"
...@@ -118,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, ...@@ -118,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN) : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
: "memory"); : "memory");
uaccess_disable(); uaccess_disable_privileged();
if (!ret) if (!ret)
*uval = val; *uval = val;
......
...@@ -8,9 +8,6 @@ ...@@ -8,9 +8,6 @@
#ifndef __ASM_PROCESSOR_H #ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H #define __ASM_PROCESSOR_H
#define KERNEL_DS UL(-1)
#define USER_DS ((UL(1) << VA_BITS) - 1)
/* /*
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
* no point in shifting all network buffers by 2 bytes just to make some IP * no point in shifting all network buffers by 2 bytes just to make some IP
...@@ -48,6 +45,7 @@ ...@@ -48,6 +45,7 @@
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN) #define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
#define TASK_SIZE_64 (UL(1) << vabits_actual) #define TASK_SIZE_64 (UL(1) << vabits_actual)
#define TASK_SIZE_MAX (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
......
...@@ -16,6 +16,11 @@ ...@@ -16,6 +16,11 @@
#define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2) #define CurrentEL_EL2 (2 << 2)
#define INIT_PSTATE_EL1 \
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL1h)
#define INIT_PSTATE_EL2 \
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
/* /*
* PMR values used to mask/unmask interrupts. * PMR values used to mask/unmask interrupts.
* *
...@@ -188,8 +193,7 @@ struct pt_regs { ...@@ -188,8 +193,7 @@ struct pt_regs {
s32 syscallno; s32 syscallno;
u32 unused2; u32 unused2;
#endif #endif
u64 sdei_ttbr1;
u64 orig_addr_limit;
/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
u64 pmr_save; u64 pmr_save;
u64 stackframe[2]; u64 stackframe[2];
......
...@@ -98,6 +98,10 @@ ...@@ -98,6 +98,10 @@
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x))
#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x))
#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \ #define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
...@@ -578,6 +582,9 @@ ...@@ -578,6 +582,9 @@
#define ENDIAN_SET_EL2 0 #define ENDIAN_SET_EL2 0
#endif #endif
#define INIT_SCTLR_EL2_MMU_OFF \
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
/* SCTLR_EL1 specific flags. */ /* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_ATA0 (BIT(42)) #define SCTLR_EL1_ATA0 (BIT(42))
...@@ -611,12 +618,15 @@ ...@@ -611,12 +618,15 @@
#define ENDIAN_SET_EL1 0 #define ENDIAN_SET_EL1 0
#endif #endif
#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ #define INIT_SCTLR_EL1_MMU_OFF \
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\ (ENDIAN_SET_EL1 | SCTLR_EL1_RES1)
SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ #define INIT_SCTLR_EL1_MMU_ON \
SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
SCTLR_EL1_RES1)
/* MAIR_ELx memory attributes (used by Linux) */ /* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
......
...@@ -18,14 +18,11 @@ struct task_struct; ...@@ -18,14 +18,11 @@ struct task_struct;
#include <asm/stack_pointer.h> #include <asm/stack_pointer.h>
#include <asm/types.h> #include <asm/types.h>
typedef unsigned long mm_segment_t;
/* /*
* low level task data that entry.S needs immediate access to. * low level task data that entry.S needs immediate access to.
*/ */
struct thread_info { struct thread_info {
unsigned long flags; /* low level flags */ unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */ u64 ttbr0; /* saved TTBR0_EL1 */
#endif #endif
...@@ -66,8 +63,7 @@ void arch_release_task_struct(struct task_struct *tsk); ...@@ -66,8 +63,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
...@@ -93,7 +89,6 @@ void arch_release_task_struct(struct task_struct *tsk); ...@@ -93,7 +89,6 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE) #define _TIF_SVE (1 << TIF_SVE)
...@@ -101,7 +96,7 @@ void arch_release_task_struct(struct task_struct *tsk); ...@@ -101,7 +96,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
_TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT) _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
...@@ -119,7 +114,6 @@ void arch_release_task_struct(struct task_struct *tsk); ...@@ -119,7 +114,6 @@ void arch_release_task_struct(struct task_struct *tsk);
{ \ { \
.flags = _TIF_FOREIGN_FPSTATE, \ .flags = _TIF_FOREIGN_FPSTATE, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
INIT_SCS \ INIT_SCS \
} }
......
This diff is collapsed.
...@@ -277,7 +277,7 @@ static void __init register_insn_emulation_sysctl(void) ...@@ -277,7 +277,7 @@ static void __init register_insn_emulation_sysctl(void)
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \ #define __user_swpX_asm(data, addr, res, temp, temp2, B) \
do { \ do { \
uaccess_enable(); \ uaccess_enable_privileged(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" mov %w3, %w7\n" \ " mov %w3, %w7\n" \
"0: ldxr"B" %w2, [%4]\n" \ "0: ldxr"B" %w2, [%4]\n" \
...@@ -302,7 +302,7 @@ do { \ ...@@ -302,7 +302,7 @@ do { \
"i" (-EFAULT), \ "i" (-EFAULT), \
"i" (__SWP_LL_SC_LOOPS) \ "i" (__SWP_LL_SC_LOOPS) \
: "memory"); \ : "memory"); \
uaccess_disable(); \ uaccess_disable_privileged(); \
} while (0) } while (0)
#define __user_swp_asm(data, addr, res, temp, temp2) \ #define __user_swp_asm(data, addr, res, temp, temp2) \
......
...@@ -30,7 +30,6 @@ int main(void) ...@@ -30,7 +30,6 @@ int main(void)
BLANK(); BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
#endif #endif
...@@ -70,7 +69,7 @@ int main(void) ...@@ -70,7 +69,7 @@ int main(void)
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
DEFINE(S_PC, offsetof(struct pt_regs, pc)); DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
......
...@@ -153,10 +153,6 @@ EXPORT_SYMBOL(cpu_hwcap_keys); ...@@ -153,10 +153,6 @@ EXPORT_SYMBOL(cpu_hwcap_keys);
.width = 0, \ .width = 0, \
} }
/* meta feature for alternatives */
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
static bool __system_matches_cap(unsigned int n); static bool __system_matches_cap(unsigned int n);
...@@ -1605,7 +1601,7 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) ...@@ -1605,7 +1601,7 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
WARN_ON_ONCE(in_interrupt()); WARN_ON_ONCE(in_interrupt());
sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
asm(SET_PSTATE_PAN(1)); set_pstate_pan(1);
} }
#endif /* CONFIG_ARM64_PAN */ #endif /* CONFIG_ARM64_PAN */
...@@ -1775,28 +1771,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1775,28 +1771,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.matches = has_no_hw_prefetch, .matches = has_no_hw_prefetch,
}, },
#ifdef CONFIG_ARM64_UAO
{
.desc = "User Access Override",
.capability = ARM64_HAS_UAO,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
.min_field_value = 1,
/*
* We rely on stop_machine() calling uao_thread_switch() to set
* UAO immediately after patching.
*/
},
#endif /* CONFIG_ARM64_UAO */
#ifdef CONFIG_ARM64_PAN
{
.capability = ARM64_ALT_PAN_NOT_UAO,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = cpufeature_pan_not_uao,
},
#endif /* CONFIG_ARM64_PAN */
#ifdef CONFIG_ARM64_VHE #ifdef CONFIG_ARM64_VHE
{ {
.desc = "Virtualization Host Extensions", .desc = "Virtualization Host Extensions",
...@@ -2667,7 +2641,7 @@ bool this_cpu_has_cap(unsigned int n) ...@@ -2667,7 +2641,7 @@ bool this_cpu_has_cap(unsigned int n)
* - The SYSTEM_FEATURE cpu_hwcaps may not have been set. * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
* In all other cases cpus_have_{const_}cap() should be used. * In all other cases cpus_have_{const_}cap() should be used.
*/ */
static bool __system_matches_cap(unsigned int n) static bool __maybe_unused __system_matches_cap(unsigned int n)
{ {
if (n < ARM64_NCAPS) { if (n < ARM64_NCAPS) {
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
...@@ -2747,12 +2721,6 @@ void __init setup_cpu_features(void) ...@@ -2747,12 +2721,6 @@ void __init setup_cpu_features(void)
ARCH_DMA_MINALIGN); ARCH_DMA_MINALIGN);
} }
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{
return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
}
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
{ {
cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
......
...@@ -216,12 +216,6 @@ alternative_else_nop_endif ...@@ -216,12 +216,6 @@ alternative_else_nop_endif
.else .else
add x21, sp, #S_FRAME_SIZE add x21, sp, #S_FRAME_SIZE
get_current_task tsk get_current_task tsk
/* Save the task's original addr_limit and set USER_DS */
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #USER_DS
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */ .endif /* \el == 0 */
mrs x22, elr_el1 mrs x22, elr_el1
mrs x23, spsr_el1 mrs x23, spsr_el1
...@@ -279,12 +273,6 @@ alternative_else_nop_endif ...@@ -279,12 +273,6 @@ alternative_else_nop_endif
.macro kernel_exit, el .macro kernel_exit, el
.if \el != 0 .if \el != 0
disable_daif disable_daif
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif .endif
/* Restore pmr */ /* Restore pmr */
...@@ -999,10 +987,9 @@ SYM_CODE_START(__sdei_asm_entry_trampoline) ...@@ -999,10 +987,9 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
mov x4, xzr mov x4, xzr
/* /*
* Use reg->interrupted_regs.addr_limit to remember whether to unmap * Remember whether to unmap the kernel on exit.
* the kernel on exit.
*/ */
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
adr x4, tramp_vectors + PAGE_SIZE adr x4, tramp_vectors + PAGE_SIZE
...@@ -1023,7 +1010,7 @@ NOKPROBE(__sdei_asm_entry_trampoline) ...@@ -1023,7 +1010,7 @@ NOKPROBE(__sdei_asm_entry_trampoline)
* x4: struct sdei_registered_event argument from registration time. * x4: struct sdei_registered_event argument from registration time.
*/ */
SYM_CODE_START(__sdei_asm_exit_trampoline) SYM_CODE_START(__sdei_asm_exit_trampoline)
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
cbnz x4, 1f cbnz x4, 1f
tramp_unmap_kernel tmp=x4 tramp_unmap_kernel tmp=x4
......
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
*/ */
SYM_CODE_START(primary_entry) SYM_CODE_START(primary_entry)
bl preserve_boot_args bl preserve_boot_args
bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl init_kernel_el // w0=cpu_boot_mode
adrp x23, __PHYS_OFFSET adrp x23, __PHYS_OFFSET
and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
bl set_cpu_boot_mode_flag bl set_cpu_boot_mode_flag
...@@ -467,24 +467,33 @@ EXPORT_SYMBOL(kimage_vaddr) ...@@ -467,24 +467,33 @@ EXPORT_SYMBOL(kimage_vaddr)
.section ".idmap.text","awx" .section ".idmap.text","awx"
/* /*
* If we're fortunate enough to boot at EL2, ensure that the world is * Starting from EL2 or EL1, configure the CPU to execute at the highest
* sane before dropping to EL1. * reachable EL supported by the kernel in a chosen default state. If dropping
* from EL2 to EL1, configure EL2 before configuring EL1.
*
* Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if
* SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.
* *
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
* booted in EL1 or EL2 respectively. * booted in EL1 or EL2 respectively.
*/ */
SYM_FUNC_START(el2_setup) SYM_FUNC_START(init_kernel_el)
msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2 cmp x0, #CurrentEL_EL2
b.eq 1f b.eq init_el2
mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
msr sctlr_el1, x0 msr sctlr_el1, x0
mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
isb isb
ret mov_q x0, INIT_PSTATE_EL1
msr spsr_el1, x0
msr elr_el1, lr
mov w0, #BOOT_CPU_MODE_EL1
eret
1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
msr sctlr_el2, x0 msr sctlr_el2, x0
#ifdef CONFIG_ARM64_VHE #ifdef CONFIG_ARM64_VHE
...@@ -593,9 +602,12 @@ set_hcr: ...@@ -593,9 +602,12 @@ set_hcr:
cbz x2, install_el2_stub cbz x2, install_el2_stub
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
isb isb
ret mov_q x0, INIT_PSTATE_EL2
msr spsr_el2, x0
msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2
eret
SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
/* /*
...@@ -605,7 +617,7 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) ...@@ -605,7 +617,7 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
* requires no configuration, and all non-hyp-specific EL2 setup * requires no configuration, and all non-hyp-specific EL2 setup
* will be done via the _EL1 system register aliases in __cpu_setup. * will be done via the _EL1 system register aliases in __cpu_setup.
*/ */
mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) mov_q x0, INIT_SCTLR_EL1_MMU_OFF
msr sctlr_el1, x0 msr sctlr_el1, x0
/* Coprocessor traps. */ /* Coprocessor traps. */
...@@ -627,14 +639,13 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) ...@@ -627,14 +639,13 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
7: adr_l x0, __hyp_stub_vectors 7: adr_l x0, __hyp_stub_vectors
msr vbar_el2, x0 msr vbar_el2, x0
/* spsr */ isb
mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ mov x0, #INIT_PSTATE_EL1
PSR_MODE_EL1h)
msr spsr_el2, x0 msr spsr_el2, x0
msr elr_el2, lr msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 mov w0, #BOOT_CPU_MODE_EL2
eret eret
SYM_FUNC_END(el2_setup) SYM_FUNC_END(init_kernel_el)
/* /*
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
...@@ -684,7 +695,7 @@ SYM_DATA_END(__early_cpu_boot_status) ...@@ -684,7 +695,7 @@ SYM_DATA_END(__early_cpu_boot_status)
* cores are held until we're ready for them to initialise. * cores are held until we're ready for them to initialise.
*/ */
SYM_FUNC_START(secondary_holding_pen) SYM_FUNC_START(secondary_holding_pen)
bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl init_kernel_el // w0=cpu_boot_mode
bl set_cpu_boot_mode_flag bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1 mrs x0, mpidr_el1
mov_q x1, MPIDR_HWID_BITMASK mov_q x1, MPIDR_HWID_BITMASK
...@@ -702,7 +713,7 @@ SYM_FUNC_END(secondary_holding_pen) ...@@ -702,7 +713,7 @@ SYM_FUNC_END(secondary_holding_pen)
* be used where CPUs are brought online dynamically by the kernel. * be used where CPUs are brought online dynamically by the kernel.
*/ */
SYM_FUNC_START(secondary_entry) SYM_FUNC_START(secondary_entry)
bl el2_setup // Drop to EL1 bl init_kernel_el // w0=cpu_boot_mode
bl set_cpu_boot_mode_flag bl set_cpu_boot_mode_flag
b secondary_startup b secondary_startup
SYM_FUNC_END(secondary_entry) SYM_FUNC_END(secondary_entry)
......
...@@ -422,16 +422,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -422,16 +422,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
p->thread.uw.tp_value = tls; p->thread.uw.tp_value = tls;
} else { } else {
/*
* A kthread has no context to ERET to, so ensure any buggy
* ERET is treated as an illegal exception return.
*
* When a user task is created from a kthread, childregs will
* be initialized by start_thread() or start_compat_thread().
*/
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h; childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
spectre_v4_enable_task_mitigation(p);
if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON;
p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz; p->thread.cpu_context.x20 = stk_sz;
...@@ -461,17 +460,6 @@ static void tls_thread_switch(struct task_struct *next) ...@@ -461,17 +460,6 @@ static void tls_thread_switch(struct task_struct *next)
write_sysreg(*task_user_tls(next), tpidr_el0); write_sysreg(*task_user_tls(next), tpidr_el0);
} }
/* Restore the UAO state depending on next's addr_limit */
void uao_thread_switch(struct task_struct *next)
{
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
if (task_thread_info(next)->addr_limit == KERNEL_DS)
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
else
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
}
}
/* /*
* Force SSBS state on context-switch, since it may be lost after migrating * Force SSBS state on context-switch, since it may be lost after migrating
* from a CPU which treats the bit as RES0 in a heterogeneous system. * from a CPU which treats the bit as RES0 in a heterogeneous system.
...@@ -555,7 +543,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, ...@@ -555,7 +543,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
hw_breakpoint_thread_switch(next); hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next); contextidr_thread_switch(next);
entry_task_switch(next); entry_task_switch(next);
uao_thread_switch(next);
ssbs_thread_switch(next); ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next); erratum_1418040_thread_switch(prev, next);
......
...@@ -538,12 +538,12 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void) ...@@ -538,12 +538,12 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
if (spectre_v4_mitigations_off()) { if (spectre_v4_mitigations_off()) {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
asm volatile(SET_PSTATE_SSBS(1)); set_pstate_ssbs(1);
return SPECTRE_VULNERABLE; return SPECTRE_VULNERABLE;
} }
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */ /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
asm volatile(SET_PSTATE_SSBS(0)); set_pstate_ssbs(0);
return SPECTRE_MITIGATED; return SPECTRE_MITIGATED;
} }
......
...@@ -178,12 +178,6 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, ...@@ -178,12 +178,6 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
sdei_api_event_context(i, &regs->regs[i]); sdei_api_event_context(i, &regs->regs[i]);
} }
/*
* We didn't take an exception to get here, set PAN. UAO will be cleared
* by sdei_event_handler()s force_uaccess_begin() call.
*/
__uaccess_enable_hw_pan();
err = sdei_event_handler(regs, arg); err = sdei_event_handler(regs, arg);
if (err) if (err)
return SDEI_EV_FAILED; return SDEI_EV_FAILED;
...@@ -222,12 +216,39 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, ...@@ -222,12 +216,39 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
return vbar + 0x480; return vbar + 0x480;
} }
static void __kprobes notrace __sdei_pstate_entry(void)
{
/*
* The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
* whether PSTATE bits are inherited unchanged or generated from
* scratch, and the TF-A implementation always clears PAN and always
* clears UAO. There are no other known implementations.
*
* Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
* PSTATE is modified upon architectural exceptions, and so PAN is
* either inherited or set per SCTLR_ELx.SPAN, and UAO is always
* cleared.
*
* We must explicitly reset PAN to the expected state, including
* clearing it when the host isn't using it, in case a VM had it set.
*/
if (system_uses_hw_pan())
set_pstate_pan(1);
else if (cpu_has_pan())
set_pstate_pan(0);
}
asmlinkage __kprobes notrace unsigned long asmlinkage __kprobes notrace unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{ {
unsigned long ret; unsigned long ret;
/*
* We didn't take an exception to get here, so the HW hasn't
* set/cleared bits in PSTATE that we may rely on. Initialize PAN.
*/
__sdei_pstate_entry();
nmi_enter(); nmi_enter();
ret = _sdei_handler(regs, arg); ret = _sdei_handler(regs, arg);
......
...@@ -922,9 +922,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, ...@@ -922,9 +922,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
trace_hardirqs_off(); trace_hardirqs_off();
do { do {
/* Check valid user FS if needed */
addr_limit_user_check();
if (thread_flags & _TIF_NEED_RESCHED) { if (thread_flags & _TIF_NEED_RESCHED) {
/* Unmask Debug and SError for the next task */ /* Unmask Debug and SError for the next task */
local_daif_restore(DAIF_PROCCTX_NOIRQ); local_daif_restore(DAIF_PROCCTX_NOIRQ);
......
...@@ -99,7 +99,7 @@ SYM_FUNC_END(__cpu_suspend_enter) ...@@ -99,7 +99,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx" .pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume) SYM_CODE_START(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly bl init_kernel_el
bl __cpu_setup bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */ /* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
......
...@@ -58,7 +58,6 @@ void notrace __cpu_suspend_exit(void) ...@@ -58,7 +58,6 @@ void notrace __cpu_suspend_exit(void)
* features that might not have been set correctly. * features that might not have been set correctly.
*/ */
__uaccess_enable_hw_pan(); __uaccess_enable_hw_pan();
uao_thread_switch(current);
/* /*
* Restore HW breakpoint registers to sane values * Restore HW breakpoint registers to sane values
......
...@@ -24,20 +24,20 @@ SYM_FUNC_START(__arch_clear_user) ...@@ -24,20 +24,20 @@ SYM_FUNC_START(__arch_clear_user)
subs x1, x1, #8 subs x1, x1, #8
b.mi 2f b.mi 2f
1: 1:
uao_user_alternative 9f, str, sttr, xzr, x0, 8 user_ldst 9f, sttr, xzr, x0, 8
subs x1, x1, #8 subs x1, x1, #8
b.pl 1b b.pl 1b
2: adds x1, x1, #4 2: adds x1, x1, #4
b.mi 3f b.mi 3f
uao_user_alternative 9f, str, sttr, wzr, x0, 4 user_ldst 9f, sttr, wzr, x0, 4
sub x1, x1, #4 sub x1, x1, #4
3: adds x1, x1, #2 3: adds x1, x1, #2
b.mi 4f b.mi 4f
uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 user_ldst 9f, sttrh, wzr, x0, 2
sub x1, x1, #2 sub x1, x1, #2
4: adds x1, x1, #1 4: adds x1, x1, #1
b.mi 5f b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 user_ldst 9f, sttrb, wzr, x0, 0
5: mov x0, #0 5: mov x0, #0
ret ret
SYM_FUNC_END(__arch_clear_user) SYM_FUNC_END(__arch_clear_user)
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
*/ */
.macro ldrb1 reg, ptr, val .macro ldrb1 reg, ptr, val
uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val user_ldst 9998f, ldtrb, \reg, \ptr, \val
.endm .endm
.macro strb1 reg, ptr, val .macro strb1 reg, ptr, val
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
.endm .endm
.macro ldrh1 reg, ptr, val .macro ldrh1 reg, ptr, val
uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val user_ldst 9998f, ldtrh, \reg, \ptr, \val
.endm .endm
.macro strh1 reg, ptr, val .macro strh1 reg, ptr, val
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
.endm .endm
.macro ldr1 reg, ptr, val .macro ldr1 reg, ptr, val
uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val user_ldst 9998f, ldtr, \reg, \ptr, \val
.endm .endm
.macro str1 reg, ptr, val .macro str1 reg, ptr, val
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
.endm .endm
.macro ldp1 reg1, reg2, ptr, val .macro ldp1 reg1, reg2, ptr, val
uao_ldp 9998f, \reg1, \reg2, \ptr, \val user_ldp 9998f, \reg1, \reg2, \ptr, \val
.endm .endm
.macro stp1 reg1, reg2, ptr, val .macro stp1 reg1, reg2, ptr, val
......
...@@ -22,35 +22,35 @@ ...@@ -22,35 +22,35 @@
* x0 - bytes not copied * x0 - bytes not copied
*/ */
.macro ldrb1 reg, ptr, val .macro ldrb1 reg, ptr, val
uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val user_ldst 9998f, ldtrb, \reg, \ptr, \val
.endm .endm
.macro strb1 reg, ptr, val .macro strb1 reg, ptr, val
uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val user_ldst 9998f, sttrb, \reg, \ptr, \val
.endm .endm
.macro ldrh1 reg, ptr, val .macro ldrh1 reg, ptr, val
uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val user_ldst 9998f, ldtrh, \reg, \ptr, \val
.endm .endm
.macro strh1 reg, ptr, val .macro strh1 reg, ptr, val
uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val user_ldst 9998f, sttrh, \reg, \ptr, \val
.endm .endm
.macro ldr1 reg, ptr, val .macro ldr1 reg, ptr, val
uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val user_ldst 9998f, ldtr, \reg, \ptr, \val
.endm .endm
.macro str1 reg, ptr, val .macro str1 reg, ptr, val
uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val user_ldst 9998f, sttr, \reg, \ptr, \val
.endm .endm
.macro ldp1 reg1, reg2, ptr, val .macro ldp1 reg1, reg2, ptr, val
uao_ldp 9998f, \reg1, \reg2, \ptr, \val user_ldp 9998f, \reg1, \reg2, \ptr, \val
.endm .endm
.macro stp1 reg1, reg2, ptr, val .macro stp1 reg1, reg2, ptr, val
uao_stp 9998f, \reg1, \reg2, \ptr, \val user_stp 9998f, \reg1, \reg2, \ptr, \val
.endm .endm
end .req x5 end .req x5
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
.endm .endm
.macro strb1 reg, ptr, val .macro strb1 reg, ptr, val
uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val user_ldst 9998f, sttrb, \reg, \ptr, \val
.endm .endm
.macro ldrh1 reg, ptr, val .macro ldrh1 reg, ptr, val
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
.endm .endm
.macro strh1 reg, ptr, val .macro strh1 reg, ptr, val
uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val user_ldst 9998f, sttrh, \reg, \ptr, \val
.endm .endm
.macro ldr1 reg, ptr, val .macro ldr1 reg, ptr, val
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
.endm .endm
.macro str1 reg, ptr, val .macro str1 reg, ptr, val
uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val user_ldst 9998f, sttr, \reg, \ptr, \val
.endm .endm
.macro ldp1 reg1, reg2, ptr, val .macro ldp1 reg1, reg2, ptr, val
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
.endm .endm
.macro stp1 reg1, reg2, ptr, val .macro stp1 reg1, reg2, ptr, val
uao_stp 9998f, \reg1, \reg2, \ptr, \val user_stp 9998f, \reg1, \reg2, \ptr, \val
.endm .endm
end .req x5 end .req x5
......
...@@ -67,7 +67,7 @@ SYM_FUNC_START(mte_copy_tags_from_user) ...@@ -67,7 +67,7 @@ SYM_FUNC_START(mte_copy_tags_from_user)
mov x3, x1 mov x3, x1
cbz x2, 2f cbz x2, 2f
1: 1:
uao_user_alternative 2f, ldrb, ldtrb, w4, x1, 0 user_ldst 2f, ldtrb, w4, x1, 0
lsl x4, x4, #MTE_TAG_SHIFT lsl x4, x4, #MTE_TAG_SHIFT
stg x4, [x0], #MTE_GRANULE_SIZE stg x4, [x0], #MTE_GRANULE_SIZE
add x1, x1, #1 add x1, x1, #1
...@@ -94,7 +94,7 @@ SYM_FUNC_START(mte_copy_tags_to_user) ...@@ -94,7 +94,7 @@ SYM_FUNC_START(mte_copy_tags_to_user)
1: 1:
ldg x4, [x1] ldg x4, [x1]
ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
uao_user_alternative 2f, strb, sttrb, w4, x0, 0 user_ldst 2f, sttrb, w4, x0, 0
add x0, x0, #1 add x0, x0, #1
add x1, x1, #MTE_GRANULE_SIZE add x1, x1, #MTE_GRANULE_SIZE
subs x2, x2, #1 subs x2, x2, #1
......
...@@ -30,9 +30,7 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from, ...@@ -30,9 +30,7 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
{ {
unsigned long rc; unsigned long rc;
uaccess_enable_not_uao(); rc = raw_copy_from_user(to, from, n);
rc = __arch_copy_from_user(to, from, n);
uaccess_disable_not_uao();
/* See above */ /* See above */
__clean_dcache_area_pop(to, n - rc); __clean_dcache_area_pop(to, n - rc);
......
...@@ -482,11 +482,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr, ...@@ -482,11 +482,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
} }
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die_kernel_fault("access to user memory with fs=KERNEL_DS",
addr, esr, regs);
if (is_el1_instruction_abort(esr)) if (is_el1_instruction_abort(esr))
die_kernel_fault("execution of user memory", die_kernel_fault("execution of user memory",
addr, esr, regs); addr, esr, regs);
......
...@@ -489,6 +489,6 @@ SYM_FUNC_START(__cpu_setup) ...@@ -489,6 +489,6 @@ SYM_FUNC_START(__cpu_setup)
/* /*
* Prepare SCTLR * Prepare SCTLR
*/ */
mov_q x0, SCTLR_EL1_SET mov_q x0, INIT_SCTLR_EL1_MMU_ON
ret // return to head.S ret // return to head.S
SYM_FUNC_END(__cpu_setup) SYM_FUNC_END(__cpu_setup)
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/uaccess.h>
/* /*
* The call to use to reach the firmware. * The call to use to reach the firmware.
...@@ -1092,26 +1091,13 @@ int sdei_event_handler(struct pt_regs *regs, ...@@ -1092,26 +1091,13 @@ int sdei_event_handler(struct pt_regs *regs,
struct sdei_registered_event *arg) struct sdei_registered_event *arg)
{ {
int err; int err;
mm_segment_t orig_addr_limit;
u32 event_num = arg->event_num; u32 event_num = arg->event_num;
/*
* Save restore 'fs'.
* The architecture's entry code save/restores 'fs' when taking an
* exception from the kernel. This ensures addr_limit isn't inherited
* if you interrupted something that allowed the uaccess routines to
* access kernel memory.
* Do the same here because this doesn't come via the same entry code.
*/
orig_addr_limit = force_uaccess_begin();
err = arg->callback(event_num, regs, arg->callback_arg); err = arg->callback(event_num, regs, arg->callback_arg);
if (err) if (err)
pr_err_ratelimited("event %u on CPU %u failed with error: %d\n", pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
event_num, smp_processor_id(), err); event_num, smp_processor_id(), err);
force_uaccess_end(orig_addr_limit);
return err; return err;
} }
NOKPROBE_SYMBOL(sdei_event_handler); NOKPROBE_SYMBOL(sdei_event_handler);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment