Commit 63722bbc authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'kcsan' of...

Merge branch 'kcsan' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into locking/core

Pull v5.9 KCSAN bits from Paul E. McKenney.

Perhaps the most important change is that GCC 11 now has all fixes in place
to support KCSAN, so GCC support can be enabled again.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 28cff52e 61d56d7a
...@@ -8,7 +8,8 @@ approach to detect races. KCSAN's primary purpose is to detect `data races`_. ...@@ -8,7 +8,8 @@ approach to detect races. KCSAN's primary purpose is to detect `data races`_.
Usage Usage
----- -----
KCSAN requires Clang version 11 or later. KCSAN is supported by both GCC and Clang. With GCC we require version 11 or
later, and with Clang also require version 11 or later.
To enable KCSAN configure the kernel with:: To enable KCSAN configure the kernel with::
......
...@@ -135,7 +135,7 @@ static inline void cpa_inc_2m_checked(void) ...@@ -135,7 +135,7 @@ static inline void cpa_inc_2m_checked(void)
static inline void cpa_inc_4k_install(void) static inline void cpa_inc_4k_install(void)
{ {
cpa_4k_install++; data_race(cpa_4k_install++);
} }
static inline void cpa_inc_lp_sameprot(int level) static inline void cpa_inc_lp_sameprot(int level)
......
...@@ -248,6 +248,8 @@ static inline void __list_splice_init_rcu(struct list_head *list, ...@@ -248,6 +248,8 @@ static inline void __list_splice_init_rcu(struct list_head *list,
*/ */
sync(); sync();
ASSERT_EXCLUSIVE_ACCESS(*first);
ASSERT_EXCLUSIVE_ACCESS(*last);
/* /*
* Readers are finished with the source list, so perform splice. * Readers are finished with the source list, so perform splice.
......
...@@ -359,7 +359,13 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) ...@@ -359,7 +359,13 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (new) { if (new) {
*new = *orig; ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
/*
* orig->shared.rb may be modified concurrently, but the clone
* will be reinitialized.
*/
*new = data_race(*orig);
INIT_LIST_HEAD(&new->anon_vma_chain); INIT_LIST_HEAD(&new->anon_vma_chain);
new->vm_next = new->vm_prev = NULL; new->vm_next = new->vm_prev = NULL;
} }
......
...@@ -7,8 +7,11 @@ CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE) ...@@ -7,8 +7,11 @@ CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
$(call cc-option,-fno-stack-protector,) -fno-stack-protector -DDISABLE_BRANCH_PROFILING
obj-y := core.o debugfs.o report.o obj-y := core.o debugfs.o report.o
obj-$(CONFIG_KCSAN_SELFTEST) += test.o obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
CFLAGS_kcsan-test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
obj-$(CONFIG_KCSAN_TEST) += kcsan-test.o
...@@ -3,8 +3,7 @@ ...@@ -3,8 +3,7 @@
#ifndef _KERNEL_KCSAN_ATOMIC_H #ifndef _KERNEL_KCSAN_ATOMIC_H
#define _KERNEL_KCSAN_ATOMIC_H #define _KERNEL_KCSAN_ATOMIC_H
#include <linux/jiffies.h> #include <linux/types.h>
#include <linux/sched.h>
/* /*
* Special rules for certain memory where concurrent conflicting accesses are * Special rules for certain memory where concurrent conflicting accesses are
...@@ -13,8 +12,7 @@ ...@@ -13,8 +12,7 @@
*/ */
static bool kcsan_is_atomic_special(const volatile void *ptr) static bool kcsan_is_atomic_special(const volatile void *ptr)
{ {
/* volatile globals that have been observed in data races. */ return false;
return ptr == &jiffies || ptr == &current->state;
} }
#endif /* _KERNEL_KCSAN_ATOMIC_H */ #endif /* _KERNEL_KCSAN_ATOMIC_H */
...@@ -776,6 +776,7 @@ EXPORT_SYMBOL(__kcsan_check_access); ...@@ -776,6 +776,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
*/ */
#define DEFINE_TSAN_READ_WRITE(size) \ #define DEFINE_TSAN_READ_WRITE(size) \
void __tsan_read##size(void *ptr); \
void __tsan_read##size(void *ptr) \ void __tsan_read##size(void *ptr) \
{ \ { \
check_access(ptr, size, 0); \ check_access(ptr, size, 0); \
...@@ -784,6 +785,7 @@ EXPORT_SYMBOL(__kcsan_check_access); ...@@ -784,6 +785,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_unaligned_read##size(void *ptr) \ void __tsan_unaligned_read##size(void *ptr) \
__alias(__tsan_read##size); \ __alias(__tsan_read##size); \
EXPORT_SYMBOL(__tsan_unaligned_read##size); \ EXPORT_SYMBOL(__tsan_unaligned_read##size); \
void __tsan_write##size(void *ptr); \
void __tsan_write##size(void *ptr) \ void __tsan_write##size(void *ptr) \
{ \ { \
check_access(ptr, size, KCSAN_ACCESS_WRITE); \ check_access(ptr, size, KCSAN_ACCESS_WRITE); \
...@@ -799,12 +801,14 @@ DEFINE_TSAN_READ_WRITE(4); ...@@ -799,12 +801,14 @@ DEFINE_TSAN_READ_WRITE(4);
DEFINE_TSAN_READ_WRITE(8); DEFINE_TSAN_READ_WRITE(8);
DEFINE_TSAN_READ_WRITE(16); DEFINE_TSAN_READ_WRITE(16);
void __tsan_read_range(void *ptr, size_t size);
void __tsan_read_range(void *ptr, size_t size) void __tsan_read_range(void *ptr, size_t size)
{ {
check_access(ptr, size, 0); check_access(ptr, size, 0);
} }
EXPORT_SYMBOL(__tsan_read_range); EXPORT_SYMBOL(__tsan_read_range);
void __tsan_write_range(void *ptr, size_t size);
void __tsan_write_range(void *ptr, size_t size) void __tsan_write_range(void *ptr, size_t size)
{ {
check_access(ptr, size, KCSAN_ACCESS_WRITE); check_access(ptr, size, KCSAN_ACCESS_WRITE);
...@@ -821,6 +825,7 @@ EXPORT_SYMBOL(__tsan_write_range); ...@@ -821,6 +825,7 @@ EXPORT_SYMBOL(__tsan_write_range);
* the size-check of compiletime_assert_rwonce_type(). * the size-check of compiletime_assert_rwonce_type().
*/ */
#define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \ #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
void __tsan_volatile_read##size(void *ptr); \
void __tsan_volatile_read##size(void *ptr) \ void __tsan_volatile_read##size(void *ptr) \
{ \ { \
const bool is_atomic = size <= sizeof(long long) && \ const bool is_atomic = size <= sizeof(long long) && \
...@@ -833,6 +838,7 @@ EXPORT_SYMBOL(__tsan_write_range); ...@@ -833,6 +838,7 @@ EXPORT_SYMBOL(__tsan_write_range);
void __tsan_unaligned_volatile_read##size(void *ptr) \ void __tsan_unaligned_volatile_read##size(void *ptr) \
__alias(__tsan_volatile_read##size); \ __alias(__tsan_volatile_read##size); \
EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \ EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
void __tsan_volatile_write##size(void *ptr); \
void __tsan_volatile_write##size(void *ptr) \ void __tsan_volatile_write##size(void *ptr) \
{ \ { \
const bool is_atomic = size <= sizeof(long long) && \ const bool is_atomic = size <= sizeof(long long) && \
...@@ -858,14 +864,17 @@ DEFINE_TSAN_VOLATILE_READ_WRITE(16); ...@@ -858,14 +864,17 @@ DEFINE_TSAN_VOLATILE_READ_WRITE(16);
* The below are not required by KCSAN, but can still be emitted by the * The below are not required by KCSAN, but can still be emitted by the
* compiler. * compiler.
*/ */
void __tsan_func_entry(void *call_pc);
void __tsan_func_entry(void *call_pc) void __tsan_func_entry(void *call_pc)
{ {
} }
EXPORT_SYMBOL(__tsan_func_entry); EXPORT_SYMBOL(__tsan_func_entry);
void __tsan_func_exit(void);
void __tsan_func_exit(void) void __tsan_func_exit(void)
{ {
} }
EXPORT_SYMBOL(__tsan_func_exit); EXPORT_SYMBOL(__tsan_func_exit);
void __tsan_init(void);
void __tsan_init(void) void __tsan_init(void)
{ {
} }
......
This diff is collapsed.
...@@ -154,7 +154,11 @@ bool osq_lock(struct optimistic_spin_queue *lock) ...@@ -154,7 +154,11 @@ bool osq_lock(struct optimistic_spin_queue *lock)
*/ */
for (;;) { for (;;) {
if (prev->next == node && /*
* cpu_relax() below implies a compiler barrier which would
* prevent this comparison being optimized away.
*/
if (data_race(prev->next) == node &&
cmpxchg(&prev->next, node, NULL) == node) cmpxchg(&prev->next, node, NULL) == node)
break; break;
......
...@@ -4,7 +4,8 @@ config HAVE_ARCH_KCSAN ...@@ -4,7 +4,8 @@ config HAVE_ARCH_KCSAN
bool bool
config HAVE_KCSAN_COMPILER config HAVE_KCSAN_COMPILER
def_bool CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1) def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)) || \
(CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-distinguish-volatile=1))
help help
For the list of compilers that support KCSAN, please see For the list of compilers that support KCSAN, please see
<file:Documentation/dev-tools/kcsan.rst>. <file:Documentation/dev-tools/kcsan.rst>.
...@@ -59,7 +60,28 @@ config KCSAN_SELFTEST ...@@ -59,7 +60,28 @@ config KCSAN_SELFTEST
bool "Perform short selftests on boot" bool "Perform short selftests on boot"
default y default y
help help
Run KCSAN selftests on boot. On test failure, causes the kernel to panic. Run KCSAN selftests on boot. On test failure, causes the kernel to
panic. Recommended to be enabled, ensuring critical functionality
works as intended.
config KCSAN_TEST
tristate "KCSAN test for integrated runtime behaviour"
depends on TRACEPOINTS && KUNIT
select TORTURE_TEST
help
KCSAN test focusing on behaviour of the integrated runtime. Tests
various race scenarios, and verifies the reports generated to
console. Makes use of KUnit for test organization, and the Torture
framework for test thread control.
Each test case may run at least up to KCSAN_REPORT_ONCE_IN_MS
milliseconds. Test run duration may be optimized by building the
kernel and KCSAN test with KCSAN_REPORT_ONCE_IN_MS set to a lower
than default value.
Say Y here if you want the test to be built into the kernel and run
during boot; say M if you want the test to build as a module; say N
if you are unsure.
config KCSAN_EARLY_ENABLE config KCSAN_EARLY_ENABLE
bool "Early enable during boot" bool "Early enable during boot"
......
...@@ -6,7 +6,7 @@ ifdef CONFIG_KCSAN ...@@ -6,7 +6,7 @@ ifdef CONFIG_KCSAN
ifdef CONFIG_CC_IS_CLANG ifdef CONFIG_CC_IS_CLANG
cc-param = -mllvm -$(1) cc-param = -mllvm -$(1)
else else
cc-param = --param -$(1) cc-param = --param $(1)
endif endif
# Keep most options here optional, to allow enabling more compilers if absence # Keep most options here optional, to allow enabling more compilers if absence
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment