Commit 70544196 authored by James Morse's avatar James Morse Committed by Catalin Marinas

arm64: kernel: Don't toggle PAN on systems with UAO

If a CPU supports both Privileged Access Never (PAN) and User Access
Override (UAO), we don't need to disable/re-enable PAN round all
copy_to_user() like calls.

UAO alternatives cause these calls to use the 'unprivileged' load/store
instructions, which are overridden to be the privileged kind when
fs==KERNEL_DS.

This patch changes the copy_to_user() calls to have their PAN toggling
depend on a new composite 'feature' ARM64_ALT_PAN_NOT_UAO.

If both features are detected, PAN will be enabled, but the copy_to_user()
alternatives will not be applied. This means PAN will be enabled all the
time for these functions. If only PAN is detected, the toggling will be
enabled as normal.

This will save the time taken to disable/re-enable PAN, and allow us to
catch copy_to_user() accesses that occur with fs==KERNEL_DS.

Futex and swp-emulation code continue to hang their PAN toggling code on
ARM64_HAS_PAN.
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 644c2ae1
...@@ -32,8 +32,9 @@ ...@@ -32,8 +32,9 @@
#define ARM64_WORKAROUND_834220 7 #define ARM64_WORKAROUND_834220 7
#define ARM64_HAS_NO_HW_PREFETCH 8 #define ARM64_HAS_NO_HW_PREFETCH 8
#define ARM64_HAS_UAO 9 #define ARM64_HAS_UAO 9
#define ARM64_ALT_PAN_NOT_UAO 10
#define ARM64_NCAPS 10 #define ARM64_NCAPS 11
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -145,7 +145,7 @@ static inline void set_fs(mm_segment_t fs) ...@@ -145,7 +145,7 @@ static inline void set_fs(mm_segment_t fs)
do { \ do { \
unsigned long __gu_val; \ unsigned long __gu_val; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
CONFIG_ARM64_PAN)); \ CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
...@@ -168,7 +168,7 @@ do { \ ...@@ -168,7 +168,7 @@ do { \
BUILD_BUG(); \ BUILD_BUG(); \
} \ } \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
CONFIG_ARM64_PAN)); \ CONFIG_ARM64_PAN)); \
} while (0) } while (0)
...@@ -217,7 +217,7 @@ do { \ ...@@ -217,7 +217,7 @@ do { \
do { \ do { \
__typeof__(*(ptr)) __pu_val = (x); \ __typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
CONFIG_ARM64_PAN)); \ CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
...@@ -239,7 +239,7 @@ do { \ ...@@ -239,7 +239,7 @@ do { \
default: \ default: \
BUILD_BUG(); \ BUILD_BUG(); \
} \ } \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
CONFIG_ARM64_PAN)); \ CONFIG_ARM64_PAN)); \
} while (0) } while (0)
......
...@@ -67,6 +67,10 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); ...@@ -67,6 +67,10 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
.width = 0, \ .width = 0, \
} }
/* meta feature for alternatives */
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
static struct arm64_ftr_bits ftr_id_aa64isar0[] = { static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
...@@ -688,6 +692,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -688,6 +692,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.enable = cpu_enable_uao, .enable = cpu_enable_uao,
}, },
#endif /* CONFIG_ARM64_UAO */ #endif /* CONFIG_ARM64_UAO */
#ifdef CONFIG_ARM64_PAN
{
.capability = ARM64_ALT_PAN_NOT_UAO,
.matches = cpufeature_pan_not_uao,
},
#endif /* CONFIG_ARM64_PAN */
{}, {},
}; };
...@@ -966,3 +976,9 @@ void __init setup_cpu_features(void) ...@@ -966,3 +976,9 @@ void __init setup_cpu_features(void)
pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
L1_CACHE_BYTES, cls); L1_CACHE_BYTES, cls);
} }
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
{
return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
}
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
* Alignment fixed up by hardware. * Alignment fixed up by hardware.
*/ */
ENTRY(__clear_user) ENTRY(__clear_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
mov x2, x1 // save the size for fixup return mov x2, x1 // save the size for fixup return
subs x1, x1, #8 subs x1, x1, #8
...@@ -54,7 +54,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 ...@@ -54,7 +54,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0 5: mov x0, #0
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
ret ret
ENDPROC(__clear_user) ENDPROC(__clear_user)
......
...@@ -67,11 +67,11 @@ ...@@ -67,11 +67,11 @@
end .req x5 end .req x5
ENTRY(__copy_from_user) ENTRY(__copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy mov x0, #0 // Nothing to copy
ret ret
......
...@@ -68,11 +68,11 @@ ...@@ -68,11 +68,11 @@
end .req x5 end .req x5
ENTRY(__copy_in_user) ENTRY(__copy_in_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
mov x0, #0 mov x0, #0
ret ret
......
...@@ -66,11 +66,11 @@ ...@@ -66,11 +66,11 @@
end .req x5 end .req x5
ENTRY(__copy_to_user) ENTRY(__copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
mov x0, #0 mov x0, #0
ret ret
......
...@@ -234,6 +234,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -234,6 +234,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
} }
if (permission_fault(esr) && (addr < USER_DS)) { if (permission_fault(esr) && (addr < USER_DS)) {
if (get_thread_info(regs->sp)->addr_limit == KERNEL_DS)
panic("Accessing user space memory with fs=KERNEL_DS");
if (!search_exception_tables(regs->pc)) if (!search_exception_tables(regs->pc))
panic("Accessing user space memory outside uaccess.h routines"); panic("Accessing user space memory outside uaccess.h routines");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment