Commit faf5b63e authored by Kees Cook's avatar Kees Cook

arm64/uaccess: Enable hardened usercopy

Enables CONFIG_HARDENED_USERCOPY checks on arm64. As done by KASAN in -next,
renames the low-level functions to __arch_copy_*_user() so a static inline
can do additional work before the copy.
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
parent dfd45b61
...@@ -51,6 +51,7 @@ config ARM64 ...@@ -51,6 +51,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
......
...@@ -256,24 +256,39 @@ do { \ ...@@ -256,24 +256,39 @@ do { \
-EFAULT; \ -EFAULT; \
}) })
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
{
check_object_size(to, n, false);
return __arch_copy_from_user(to, from, n);
}
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
{
check_object_size(from, n, true);
return __arch_copy_to_user(to, from, n);
}
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (access_ok(VERIFY_READ, from, n)) if (access_ok(VERIFY_READ, from, n)) {
n = __copy_from_user(to, from, n); check_object_size(to, n, false);
else /* security hole - plug it */ n = __arch_copy_from_user(to, from, n);
} else /* security hole - plug it */
memset(to, 0, n); memset(to, 0, n);
return n; return n;
} }
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
if (access_ok(VERIFY_WRITE, to, n)) if (access_ok(VERIFY_WRITE, to, n)) {
n = __copy_to_user(to, from, n); check_object_size(from, n, true);
n = __arch_copy_to_user(to, from, n);
}
return n; return n;
} }
......
...@@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page); ...@@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
/* user mem (segment) */ /* user mem (segment) */
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__copy_in_user); EXPORT_SYMBOL(__copy_in_user);
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
.endm .endm
end .req x5 end .req x5
ENTRY(__copy_from_user) ENTRY(__arch_copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
add end, x0, x2 add end, x0, x2
...@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ ...@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy mov x0, #0 // Nothing to copy
ret ret
ENDPROC(__copy_from_user) ENDPROC(__arch_copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 2 .align 2
......
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
.endm .endm
end .req x5 end .req x5
ENTRY(__copy_to_user) ENTRY(__arch_copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
add end, x0, x2 add end, x0, x2
...@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ ...@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN) CONFIG_ARM64_PAN)
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__copy_to_user) ENDPROC(__arch_copy_to_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 2 .align 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment