Commit 4953fc3d authored by Tong Tiangen's avatar Tong Tiangen Committed by Will Deacon

arm64: extable: add new extable type EX_TYPE_KACCESS_ERR_ZERO support

Currently, The extable type EX_TYPE_UACCESS_ERR_ZERO is used by
__get/put_kernel_nofault(), but those helpers are not uaccess type, so we
add a new extable type EX_TYPE_KACCESS_ERR_ZERO which can be used by
__get/put_kernel_no_fault().

This is also to prepare for distinguishing the two types in machine check
safe process.
Suggested-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarTong Tiangen <tongtiangen@huawei.com>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20220621072638.1273594-2-tongtiangen@huawei.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent a111daf0
......@@ -6,7 +6,8 @@
#define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2
#define EX_TYPE_UACCESS_ERR_ZERO 3
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
#define EX_TYPE_KACCESS_ERR_ZERO 4
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 5
#ifdef __ASSEMBLY__
......@@ -73,9 +74,21 @@
EX_DATA_REG(ZERO, zero) \
")")
#define _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, zero) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
__stringify(EX_TYPE_KACCESS_ERR_ZERO), \
"(" \
EX_DATA_REG(ERR, err) " | " \
EX_DATA_REG(ZERO, zero) \
")")
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr)
#define EX_DATA_REG_DATA_SHIFT 0
#define EX_DATA_REG_DATA GENMASK(4, 0)
#define EX_DATA_REG_ADDR_SHIFT 5
......
......@@ -232,34 +232,34 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
#define __get_mem_asm(load, reg, x, addr, err) \
#define __get_mem_asm(load, reg, x, addr, err, type) \
asm volatile( \
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
: "+r" (err), "=&r" (x) \
: "r" (addr))
#define __raw_get_mem(ldr, x, ptr, err) \
do { \
unsigned long __gu_val; \
switch (sizeof(*(ptr))) { \
case 1: \
__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
break; \
case 2: \
__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
break; \
case 4: \
__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
break; \
case 8: \
__get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
#define __raw_get_mem(ldr, x, ptr, err, type) \
do { \
unsigned long __gu_val; \
switch (sizeof(*(ptr))) { \
case 1: \
__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type); \
break; \
case 2: \
__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type); \
break; \
case 4: \
__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type); \
break; \
case 8: \
__get_mem_asm(ldr, "%x", __gu_val, (ptr), (err), type); \
break; \
default: \
BUILD_BUG(); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
/*
......@@ -274,7 +274,7 @@ do { \
__chk_user_ptr(ptr); \
\
uaccess_ttbr0_enable(); \
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U); \
uaccess_ttbr0_disable(); \
\
(x) = __rgu_val; \
......@@ -314,40 +314,40 @@ do { \
\
__uaccess_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
(__force type *)(__gkn_src), __gkn_err); \
(__force type *)(__gkn_src), __gkn_err, K); \
__uaccess_disable_tco_async(); \
\
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
#define __put_mem_asm(store, reg, x, addr, err) \
#define __put_mem_asm(store, reg, x, addr, err, type) \
asm volatile( \
"1: " store " " reg "1, [%2]\n" \
"2:\n" \
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
_ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
: "+r" (err) \
: "r" (x), "r" (addr))
#define __raw_put_mem(str, x, ptr, err) \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
switch (sizeof(*(ptr))) { \
case 1: \
__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
break; \
case 2: \
__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
break; \
case 4: \
__put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
break; \
case 8: \
__put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
} \
#define __raw_put_mem(str, x, ptr, err, type) \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
switch (sizeof(*(ptr))) { \
case 1: \
__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \
break; \
case 2: \
__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \
break; \
case 4: \
__put_mem_asm(str, "%w", __pu_val, (ptr), (err), type); \
break; \
case 8: \
__put_mem_asm(str, "%x", __pu_val, (ptr), (err), type); \
break; \
default: \
BUILD_BUG(); \
} \
} while (0)
/*
......@@ -362,7 +362,7 @@ do { \
__chk_user_ptr(__rpu_ptr); \
\
uaccess_ttbr0_enable(); \
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \
uaccess_ttbr0_disable(); \
} while (0)
......@@ -400,7 +400,7 @@ do { \
\
__uaccess_enable_tco_async(); \
__raw_put_mem("str", *((type *)(__pkn_src)), \
(__force type *)(__pkn_dst), __pkn_err); \
(__force type *)(__pkn_dst), __pkn_err, K); \
__uaccess_disable_tco_async(); \
\
if (unlikely(__pkn_err)) \
......
......@@ -77,6 +77,7 @@ bool fixup_exception(struct pt_regs *regs)
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
case EX_TYPE_KACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
return ex_handler_load_unaligned_zeropad(ex, regs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment