Commit bafab443 authored by Michael Ellerman's avatar Michael Ellerman Committed by Khalid Elmously

powerpc: Use barrier_nospec in copy_from_user()

BugLink: https://bugs.launchpad.net/bugs/1830176

commit ddf35cf3 upstream.

Based on the x86 commit doing the same.

See commit 304ec1b0 ("x86/uaccess: Use __uaccess_begin_nospec()
and uaccess_try_nospec") and b3bbfb3f ("x86: Introduce
__uaccess_begin_nospec() and uaccess_try_nospec") for more detail.

In all cases we are ordering the load from the potentially
user-controlled pointer vs a previous branch based on an access_ok()
check or similar.

Base on a patch from Michal Suchanek.
Signed-off-by: default avatarMichal Suchanek <msuchanek@suse.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Signed-off-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
parent cc8c40f3
...@@ -269,6 +269,7 @@ do { \ ...@@ -269,6 +269,7 @@ do { \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \ if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \ might_fault(); \
barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \ (x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \ __gu_err; \
...@@ -283,6 +284,7 @@ do { \ ...@@ -283,6 +284,7 @@ do { \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \ if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \ might_fault(); \
barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \ __gu_err; \
...@@ -295,8 +297,10 @@ do { \ ...@@ -295,8 +297,10 @@ do { \
unsigned long __gu_val = 0; \ unsigned long __gu_val = 0; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \ might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \ __gu_err; \
}) })
...@@ -307,6 +311,7 @@ do { \ ...@@ -307,6 +311,7 @@ do { \
unsigned long __gu_val; \ unsigned long __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \ __gu_err; \
...@@ -323,8 +328,10 @@ extern unsigned long __copy_tofrom_user(void __user *to, ...@@ -323,8 +328,10 @@ extern unsigned long __copy_tofrom_user(void __user *to,
static inline unsigned long copy_from_user(void *to, static inline unsigned long copy_from_user(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n)
{ {
if (likely(access_ok(VERIFY_READ, from, n))) if (likely(access_ok(VERIFY_READ, from, n))) {
barrier_nospec();
return __copy_tofrom_user((__force void __user *)to, from, n); return __copy_tofrom_user((__force void __user *)to, from, n);
}
memset(to, 0, n); memset(to, 0, n);
return n; return n;
} }
...@@ -359,21 +366,27 @@ static inline unsigned long __copy_from_user_inatomic(void *to, ...@@ -359,21 +366,27 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
switch (n) { switch (n) {
case 1: case 1:
barrier_nospec();
__get_user_size(*(u8 *)to, from, 1, ret); __get_user_size(*(u8 *)to, from, 1, ret);
break; break;
case 2: case 2:
barrier_nospec();
__get_user_size(*(u16 *)to, from, 2, ret); __get_user_size(*(u16 *)to, from, 2, ret);
break; break;
case 4: case 4:
barrier_nospec();
__get_user_size(*(u32 *)to, from, 4, ret); __get_user_size(*(u32 *)to, from, 4, ret);
break; break;
case 8: case 8:
barrier_nospec();
__get_user_size(*(u64 *)to, from, 8, ret); __get_user_size(*(u64 *)to, from, 8, ret);
break; break;
} }
if (ret == 0) if (ret == 0)
return 0; return 0;
} }
barrier_nospec();
return __copy_tofrom_user((__force void __user *)to, from, n); return __copy_tofrom_user((__force void __user *)to, from, n);
} }
...@@ -400,6 +413,7 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, ...@@ -400,6 +413,7 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
if (ret == 0) if (ret == 0)
return 0; return 0;
} }
return __copy_tofrom_user(to, (__force const void __user *)from, n); return __copy_tofrom_user(to, (__force const void __user *)from, n);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment