Commit ff901d80 authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Andrew Morton

x86: kmsan: use __msan_ string functions where possible.

Unless stated otherwise (by explicitly calling __memcpy(), __memset() or
__memmove()) we want all string functions to call their __msan_ versions
(e.g.  __msan_memcpy() instead of memcpy()), so that shadow and origin
values are updated accordingly.

Bootloader must still use the default string functions to avoid crashes.

Link: https://lkml.kernel.org/r/20220915150417.722975-36-glider@google.comSigned-off-by: default avatarAlexander Potapenko <glider@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Marco Elver <elver@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9245ec01
...@@ -11,11 +11,23 @@ ...@@ -11,11 +11,23 @@
function. */ function. */
#define __HAVE_ARCH_MEMCPY 1 #define __HAVE_ARCH_MEMCPY 1
#if defined(__SANITIZE_MEMORY__)
#undef memcpy
void *__msan_memcpy(void *dst, const void *src, size_t size);
#define memcpy __msan_memcpy
#else
extern void *memcpy(void *to, const void *from, size_t len); extern void *memcpy(void *to, const void *from, size_t len);
#endif
extern void *__memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len);
#define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMSET
#if defined(__SANITIZE_MEMORY__)
extern void *__msan_memset(void *s, int c, size_t n);
#undef memset
#define memset __msan_memset
#else
void *memset(void *s, int c, size_t n); void *memset(void *s, int c, size_t n);
#endif
void *__memset(void *s, int c, size_t n); void *__memset(void *s, int c, size_t n);
#define __HAVE_ARCH_MEMSET16 #define __HAVE_ARCH_MEMSET16
...@@ -55,7 +67,13 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t n) ...@@ -55,7 +67,13 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
} }
#define __HAVE_ARCH_MEMMOVE #define __HAVE_ARCH_MEMMOVE
#if defined(__SANITIZE_MEMORY__)
#undef memmove
void *__msan_memmove(void *dest, const void *src, size_t len);
#define memmove __msan_memmove
#else
void *memmove(void *dest, const void *src, size_t count); void *memmove(void *dest, const void *src, size_t count);
#endif
void *__memmove(void *dest, const void *src, size_t count); void *__memmove(void *dest, const void *src, size_t count);
int memcmp(const void *cs, const void *ct, size_t count); int memcmp(const void *cs, const void *ct, size_t count);
...@@ -64,8 +82,7 @@ char *strcpy(char *dest, const char *src); ...@@ -64,8 +82,7 @@ char *strcpy(char *dest, const char *src);
char *strcat(char *dest, const char *src); char *strcat(char *dest, const char *src);
int strcmp(const char *cs, const char *ct); int strcmp(const char *cs, const char *ct);
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) #if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
/* /*
* For files that not instrumented (e.g. mm/slub.c) we * For files that not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions. * should use not instrumented version of mem* functions.
...@@ -73,7 +90,9 @@ int strcmp(const char *cs, const char *ct); ...@@ -73,7 +90,9 @@ int strcmp(const char *cs, const char *ct);
#undef memcpy #undef memcpy
#define memcpy(dst, src, len) __memcpy(dst, src, len) #define memcpy(dst, src, len) __memcpy(dst, src, len)
#undef memmove
#define memmove(dst, src, len) __memmove(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len)
#undef memset
#define memset(s, c, n) __memset(s, c, n) #define memset(s, c, n) __memset(s, c, n)
#ifndef __NO_FORTIFY #ifndef __NO_FORTIFY
......
...@@ -285,8 +285,10 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size, ...@@ -285,8 +285,10 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
* __builtin_object_size() must be captured here to avoid evaluating argument * __builtin_object_size() must be captured here to avoid evaluating argument
* side-effects further into the macro layers. * side-effects further into the macro layers.
*/ */
#ifndef CONFIG_KMSAN
#define memset(p, c, s) __fortify_memset_chk(p, c, s, \ #define memset(p, c, s) __fortify_memset_chk(p, c, s, \
__builtin_object_size(p, 0), __builtin_object_size(p, 1)) __builtin_object_size(p, 0), __builtin_object_size(p, 1))
#endif
/* /*
* To make sure the compiler can enforce protection against buffer overflows, * To make sure the compiler can enforce protection against buffer overflows,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment