Commit 75cf0290 authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Andrew Morton

instrumented.h: add KMSAN support

To avoid false positives, KMSAN needs to unpoison the data copied from the
userspace.  To detect infoleaks - check the memory buffer passed to
copy_to_user().

Link: https://lkml.kernel.org/r/20220915150417.722975-19-glider@google.comSigned-off-by: default avatarAlexander Potapenko <glider@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 3c206509
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* This header provides generic wrappers for memory access instrumentation that * This header provides generic wrappers for memory access instrumentation that
* the compiler cannot emit for: KASAN, KCSAN. * the compiler cannot emit for: KASAN, KCSAN, KMSAN.
*/ */
#ifndef _LINUX_INSTRUMENTED_H #ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H #define _LINUX_INSTRUMENTED_H
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kasan-checks.h> #include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h> #include <linux/kcsan-checks.h>
#include <linux/kmsan-checks.h>
#include <linux/types.h> #include <linux/types.h>
/** /**
...@@ -117,6 +118,7 @@ instrument_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -117,6 +118,7 @@ instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
kasan_check_read(from, n); kasan_check_read(from, n);
kcsan_check_read(from, n); kcsan_check_read(from, n);
kmsan_copy_to_user(to, from, n, 0);
} }
/** /**
...@@ -151,6 +153,7 @@ static __always_inline void ...@@ -151,6 +153,7 @@ static __always_inline void
instrument_copy_from_user_after(const void *to, const void __user *from, instrument_copy_from_user_after(const void *to, const void __user *from,
unsigned long n, unsigned long left) unsigned long n, unsigned long left)
{ {
kmsan_unpoison_memory(to, n - left);
} }
/** /**
...@@ -162,10 +165,14 @@ instrument_copy_from_user_after(const void *to, const void __user *from, ...@@ -162,10 +165,14 @@ instrument_copy_from_user_after(const void *to, const void __user *from,
* *
* @to destination variable, may not be address-taken * @to destination variable, may not be address-taken
*/ */
#define instrument_get_user(to) \ #define instrument_get_user(to) \
({ \ ({ \
u64 __tmp = (u64)(to); \
kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
to = __tmp; \
}) })
/** /**
* instrument_put_user() - add instrumentation to put_user()-like macros * instrument_put_user() - add instrumentation to put_user()-like macros
* *
...@@ -177,8 +184,9 @@ instrument_copy_from_user_after(const void *to, const void __user *from, ...@@ -177,8 +184,9 @@ instrument_copy_from_user_after(const void *to, const void __user *from,
* @ptr userspace pointer to copy to * @ptr userspace pointer to copy to
* @size number of bytes to copy * @size number of bytes to copy
*/ */
#define instrument_put_user(from, ptr, size) \ #define instrument_put_user(from, ptr, size) \
({ \ ({ \
kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
}) })
#endif /* _LINUX_INSTRUMENTED_H */ #endif /* _LINUX_INSTRUMENTED_H */
...@@ -46,6 +46,21 @@ void kmsan_unpoison_memory(const void *address, size_t size); ...@@ -46,6 +46,21 @@ void kmsan_unpoison_memory(const void *address, size_t size);
*/ */
void kmsan_check_memory(const void *address, size_t size); void kmsan_check_memory(const void *address, size_t size);
/**
* kmsan_copy_to_user() - Notify KMSAN about a data transfer to userspace.
* @to: destination address in the userspace.
* @from: source address in the kernel.
* @to_copy: number of bytes to copy.
* @left: number of bytes not copied.
*
* If this is a real userspace data transfer, KMSAN checks the bytes that were
* actually copied to ensure there was no information leak. If @to belongs to
* the kernel space (which is possible for compat syscalls), KMSAN just copies
* the metadata.
*/
void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
size_t left);
#else #else
static inline void kmsan_poison_memory(const void *address, size_t size, static inline void kmsan_poison_memory(const void *address, size_t size,
...@@ -58,6 +73,10 @@ static inline void kmsan_unpoison_memory(const void *address, size_t size) ...@@ -58,6 +73,10 @@ static inline void kmsan_unpoison_memory(const void *address, size_t size)
static inline void kmsan_check_memory(const void *address, size_t size) static inline void kmsan_check_memory(const void *address, size_t size)
{ {
} }
static inline void kmsan_copy_to_user(void __user *to, const void *from,
size_t to_copy, size_t left)
{
}
#endif #endif
......
...@@ -205,6 +205,44 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end) ...@@ -205,6 +205,44 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
kmsan_leave_runtime(); kmsan_leave_runtime();
} }
void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
size_t left)
{
unsigned long ua_flags;
if (!kmsan_enabled || kmsan_in_runtime())
return;
/*
* At this point we've copied the memory already. It's hard to check it
* before copying, as the size of actually copied buffer is unknown.
*/
/* copy_to_user() may copy zero bytes. No need to check. */
if (!to_copy)
return;
/* Or maybe copy_to_user() failed to copy anything. */
if (to_copy <= left)
return;
ua_flags = user_access_save();
if ((u64)to < TASK_SIZE) {
/* This is a user memory access, check it. */
kmsan_internal_check_memory((void *)from, to_copy - left, to,
REASON_COPY_TO_USER);
} else {
/* Otherwise this is a kernel memory access. This happens when a
* compat syscall passes an argument allocated on the kernel
* stack to a real syscall.
* Don't check anything, just copy the shadow of the copied
* bytes.
*/
kmsan_internal_memmove_metadata((void *)to, (void *)from,
to_copy - left);
}
user_access_restore(ua_flags);
}
EXPORT_SYMBOL(kmsan_copy_to_user);
/* Functions from kmsan-checks.h follow. */ /* Functions from kmsan-checks.h follow. */
void kmsan_poison_memory(const void *address, size_t size, gfp_t flags) void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment