Commit 9dd819a1 authored by Kees Cook's avatar Kees Cook Committed by Linus Torvalds

uaccess: add missing __must_check attributes

The usercopy implementation comments describe that callers of the
copy_*_user() family of functions must always have their return values
checked.  This can be enforced at compile time with __must_check, so add
it where needed.

Link: http://lkml.kernel.org/r/201908251609.ADAD5CAAC1@keescookSigned-off-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d5372c39
...@@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count) ...@@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
} }
static __always_inline bool static __always_inline __must_check bool
check_copy_size(const void *addr, size_t bytes, bool is_source) check_copy_size(const void *addr, size_t bytes, bool is_source)
{ {
int sz = __compiletime_object_size(addr); int sz = __compiletime_object_size(addr);
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
* as usual) and both source and destination can trigger faults. * as usual) and both source and destination can trigger faults.
*/ */
static __always_inline unsigned long static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{ {
kasan_check_write(to, n); kasan_check_write(to, n);
...@@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) ...@@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return raw_copy_from_user(to, from, n); return raw_copy_from_user(to, from, n);
} }
static __always_inline unsigned long static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n) __copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
might_fault(); might_fault();
...@@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
* The caller should also make sure he pins the user space address * The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep. * so that we don't result in page fault and sleep.
*/ */
static __always_inline unsigned long static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{ {
kasan_check_read(from, n); kasan_check_read(from, n);
...@@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) ...@@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return raw_copy_to_user(to, from, n); return raw_copy_to_user(to, from, n);
} }
static __always_inline unsigned long static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n) __copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
might_fault(); might_fault();
...@@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
} }
#ifdef INLINE_COPY_FROM_USER #ifdef INLINE_COPY_FROM_USER
static inline unsigned long static inline __must_check unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n) _copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
unsigned long res = n; unsigned long res = n;
...@@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return res; return res;
} }
#else #else
extern unsigned long extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long); _copy_from_user(void *, const void __user *, unsigned long);
#endif #endif
#ifdef INLINE_COPY_TO_USER #ifdef INLINE_COPY_TO_USER
static inline unsigned long static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n) _copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
might_fault(); might_fault();
...@@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
return n; return n;
} }
#else #else
extern unsigned long extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long); _copy_to_user(void __user *, const void *, unsigned long);
#endif #endif
...@@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void) ...@@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS #ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to, static inline __must_check unsigned long
const void __user *from, unsigned long n) __copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n)
{ {
return __copy_from_user_inatomic(to, from, n); return __copy_from_user_inatomic(to, from, n);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment