Commit 37d1a04b authored by Thomas Gleixner's avatar Thomas Gleixner

Rebase locking/kcsan to locking/urgent

Merge the state of the locking kcsan branch before the read/write_once()
and the atomics modifications got merged.

Squash the fallout of the rebase on top of the read/write once and atomic
fallback work into the merge. The history of the original branch is
preserved in tag locking-kcsan-2020-06-02.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parents 37f8173d 97a9474a
......@@ -21,6 +21,7 @@ whole; patches welcome!
kasan
ubsan
kmemleak
kcsan
gdb-kernel-debugging
kgdb
kselftest
......
This diff is collapsed.
......@@ -9305,6 +9305,17 @@ F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
KCSAN
M: Marco Elver <elver@google.com>
R: Dmitry Vyukov <dvyukov@google.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kcsan.rst
F: include/linux/kcsan*.h
F: kernel/kcsan/
F: lib/Kconfig.kcsan
F: scripts/Makefile.kcsan
KDUMP
M: Dave Young <dyoung@redhat.com>
M: Baoquan He <bhe@redhat.com>
......
......@@ -531,7 +531,7 @@ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN CFLAGS_KCSAN
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
......@@ -965,6 +965,7 @@ endif
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
include scripts/Makefile.kcsan
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
KBUILD_CPPFLAGS += $(KCPPFLAGS)
......
......@@ -233,6 +233,7 @@ config X86
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
select HAVE_ARCH_KCSAN if X86_64
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
......
......@@ -9,7 +9,9 @@
# Changed by many, many contributors over the years.
#
# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Kernel does not boot with kcov instrumentation here.
......
......@@ -17,7 +17,9 @@
# (see scripts/Makefile.lib size_append)
# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
......
......@@ -10,8 +10,11 @@ ARCH_REL_TYPE_ABS += R_386_GLOB_DAT|R_386_JMP_SLOT|R_386_RELATIVE
include $(srctree)/lib/vdso/Makefile
KBUILD_CFLAGS += $(DISABLE_LTO)
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
......@@ -29,6 +32,9 @@ vobjs32-y += vdso32/vclock_gettime.o
# files to link into kernel
obj-y += vma.o
KASAN_SANITIZE_vma.o := y
UBSAN_SANITIZE_vma.o := y
KCSAN_SANITIZE_vma.o := y
OBJECT_FILES_NON_STANDARD_vma.o := n
# vDSO images to build
......
......@@ -201,8 +201,12 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
/*
* Because this is a plain access, we need to disable KCSAN here to
* avoid double instrumentation via instrumented bitops.
*/
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
......
......@@ -28,6 +28,10 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y
......
......@@ -13,6 +13,9 @@ endif
KCOV_INSTRUMENT_common.o := n
KCOV_INSTRUMENT_perf_event.o := n
# As above, instrumenting secondary CPU boot code causes boot hangs.
KCSAN_SANITIZE_common.o := n
# Make sure load_percpu_segment has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_common.o := $(nostackp)
......
......@@ -991,7 +991,15 @@ void __init e820__reserve_setup_data(void)
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
e820__range_update_kexec(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
/*
* SETUP_EFI is supplied by kexec and does not need to be
* reserved.
*/
if (data->type != SETUP_EFI)
e820__range_update_kexec(pa_data,
sizeof(*data) + data->len,
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
if (data->type == SETUP_INDIRECT &&
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
......
......@@ -6,10 +6,19 @@
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
# KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
KCSAN_SANITIZE_delay.o := n
ifdef CONFIG_KCSAN
# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
endif
# Early boot use of cmdline; don't instrument it
ifdef CONFIG_AMD_MEM_ENCRYPT
KCOV_INSTRUMENT_cmdline.o := n
KASAN_SANITIZE_cmdline.o := n
KCSAN_SANITIZE_cmdline.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
......
......@@ -7,6 +7,10 @@ KCOV_INSTRUMENT_mem_encrypt_identity.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_identity.o := n
# Disable KCSAN entirely, because otherwise we get warnings that some functions
# reference __initdata sections.
KCSAN_SANITIZE := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
......
......@@ -14,10 +14,18 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro
# When linking purgatory.ro with -r unresolved symbols are not checked,
# also link a purgatory.chk binary without -r to check for unresolved symbols.
PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib
LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
targets += purgatory.ro purgatory.chk
# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# These are adjustments to the compiler flags used for objects that
......@@ -25,7 +33,7 @@ KCOV_INSTRUMENT := n
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
......@@ -58,12 +66,15 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
$(call if_changed,ld)
targets += kexec-purgatory.c
quiet_cmd_bin2c = BIN2C $@
cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
$(call if_changed,bin2c)
obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o
......@@ -6,7 +6,10 @@
# for more details.
#
#
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
subdir- := rm
......
......@@ -6,7 +6,10 @@
# for more details.
#
#
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
......
......@@ -37,7 +37,9 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
GCOV_PROFILE := n
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
......
This diff is collapsed.
This diff is collapsed.
......@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#include <linux/kasan-checks.h>
#include <linux/instrumented.h>
/**
* set_bit - Atomically set a bit in memory
......@@ -25,7 +25,7 @@
*/
static inline void set_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
}
......@@ -38,7 +38,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*/
static inline void clear_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
}
......@@ -54,7 +54,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
......@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
......@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
......@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
......
......@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#include <linux/kasan-checks.h>
#include <linux/instrumented.h>
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
......@@ -22,7 +22,7 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
......@@ -37,7 +37,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
......@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
......@@ -71,7 +71,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
/* Let everybody know we have it. */
......
......@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#include <linux/kasan-checks.h>
#include <linux/instrumented.h>
/**
* __set_bit - Set a bit in memory
......@@ -24,7 +24,7 @@
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
......@@ -39,7 +39,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
......@@ -54,7 +54,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
......@@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_set_bit(nr, addr);
}
......@@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_clear_bit(nr, addr);
}
......@@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_change_bit(nr, addr);
}
......@@ -107,7 +107,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_bit(long nr, const volatile unsigned long *addr)
{
kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
......
......@@ -16,7 +16,7 @@
#define KASAN_ABI_VERSION 5
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
/* emulate gcc's __SANITIZE_ADDRESS__ flag */
/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
......@@ -24,6 +24,15 @@
#define __no_sanitize_address
#endif
#if __has_feature(thread_sanitizer)
/* emulate gcc's __SANITIZE_THREAD__ flag */
#define __SANITIZE_THREAD__
#define __no_sanitize_thread \
__attribute__((no_sanitize("thread")))
#else
#define __no_sanitize_thread
#endif
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
......
......@@ -144,6 +144,12 @@
#define __no_sanitize_address
#endif
#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
#define __no_sanitize_thread __attribute__((no_sanitize_thread))
#else
#define __no_sanitize_thread
#endif
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
......
......@@ -250,6 +250,27 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
*/
#include <asm/barrier.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
/**
* data_race - mark an expression as containing intentional data races
*
* This data_race() macro is useful for situations in which data races
* should be forgiven. One example is diagnostic code that accesses
* shared variables but is not a part of the core synchronization design.
*
* This macro *does not* affect normal code generation, but is a hint
* to tooling that data races here are to be ignored.
*/
#define data_race(expr) \
({ \
__kcsan_disable_current(); \
({ \
__unqual_scalar_typeof(({ expr; })) __v = ({ expr; }); \
__kcsan_enable_current(); \
__v; \
}); \
})
/*
* Use __READ_ONCE() instead of READ_ONCE() if you do not require any
......@@ -260,7 +281,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __READ_ONCE_SCALAR(x) \
({ \
__unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
typeof(x) *__xp = &(x); \
__unqual_scalar_typeof(x) __x = data_race(__READ_ONCE(*__xp)); \
kcsan_check_atomic_read(__xp, sizeof(*__xp)); \
smp_read_barrier_depends(); \
(typeof(x))__x; \
})
......@@ -276,10 +299,17 @@ do { \
*(volatile typeof(x) *)&(x) = (val); \
} while (0)
#define __WRITE_ONCE_SCALAR(x, val) \
do { \
typeof(x) *__xp = &(x); \
kcsan_check_atomic_write(__xp, sizeof(*__xp)); \
data_race(({ __WRITE_ONCE(*__xp, val); 0; })); \
} while (0)
#define WRITE_ONCE(x, val) \
do { \
compiletime_assert_rwonce_type(x); \
__WRITE_ONCE(x, val); \
__WRITE_ONCE_SCALAR(x, val); \
} while (0)
#ifdef CONFIG_KASAN
......@@ -290,11 +320,30 @@ do { \
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
# define __no_sanitize_or_inline __no_kasan_or_inline
#else
# define __no_kasan_or_inline __always_inline
#endif
static __no_kasan_or_inline
#define __no_kcsan __no_sanitize_thread
#ifdef __SANITIZE_THREAD__
/*
* Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in
* compilation units where instrumentation is disabled. The attribute 'noinline'
* is required for older compilers, where implicit inlining of very small
* functions renders __no_sanitize_thread ineffective.
*/
# define __no_kcsan_or_inline __no_kcsan noinline notrace __maybe_unused
# define __no_sanitize_or_inline __no_kcsan_or_inline
#else
# define __no_kcsan_or_inline __always_inline
#endif
#ifndef __no_sanitize_or_inline
#define __no_sanitize_or_inline __always_inline
#endif
static __no_sanitize_or_inline
unsigned long __read_once_word_nocheck(const void *addr)
{
return __READ_ONCE(*(unsigned long *)addr);
......@@ -302,8 +351,8 @@ unsigned long __read_once_word_nocheck(const void *addr)
/*
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
* word from memory atomically but without telling KASAN. This is usually
* used by unwinding code when walking the stack of a running process.
* word from memory atomically but without telling KASAN/KCSAN. This is
* usually used by unwinding code when walking the stack of a running process.
*/
#define READ_ONCE_NOCHECK(x) \
({ \
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header provides generic wrappers for memory access instrumentation that
* the compiler cannot emit for: KASAN, KCSAN.
*/
#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
#include <linux/types.h>
/**
* instrument_read - instrument regular read access
*
* Instrument a regular read access. The instrumentation should be inserted
* before the actual read happens.
*
* @ptr address of access
* @size size of access
*/
static __always_inline void instrument_read(const volatile void *v, size_t size)
{
kasan_check_read(v, size);
kcsan_check_read(v, size);
}
/**
* instrument_write - instrument regular write access
*
* Instrument a regular write access. The instrumentation should be inserted
* before the actual write happens.
*
* @ptr address of access
* @size size of access
*/
static __always_inline void instrument_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_write(v, size);
}
/**
* instrument_atomic_read - instrument atomic read access
*
* Instrument an atomic read access. The instrumentation should be inserted
* before the actual read happens.
*
* @ptr address of access
* @size size of access
*/
static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
{
kasan_check_read(v, size);
kcsan_check_atomic_read(v, size);
}
/**
* instrument_atomic_write - instrument atomic write access
*
* Instrument an atomic write access. The instrumentation should be inserted
* before the actual write happens.
*
* @ptr address of access
* @size size of access
*/
static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
{
kasan_check_write(v, size);
kcsan_check_atomic_write(v, size);
}
/**
* instrument_copy_to_user - instrument reads of copy_to_user
*
* Instrument reads from kernel memory, that are due to copy_to_user (and
* variants). The instrumentation must be inserted before the accesses.
*
* @to destination address
* @from source address
* @n number of bytes to copy
*/
static __always_inline void
instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
kcsan_check_read(from, n);
}
/**
* instrument_copy_from_user - instrument writes of copy_from_user
*
* Instrument writes to kernel memory, that are due to copy_from_user (and
* variants). The instrumentation should be inserted before the accesses.
*
* @to destination address
* @from source address
* @n number of bytes to copy
*/
static __always_inline void
instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
kcsan_check_write(to, n);
}
#endif /* _LINUX_INSTRUMENTED_H */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KCSAN_H
#define _LINUX_KCSAN_H
#include <linux/kcsan-checks.h>
#include <linux/types.h>
#ifdef CONFIG_KCSAN
/*
* Context for each thread of execution: for tasks, this is stored in
* task_struct, and interrupts access internal per-CPU storage.
*/
struct kcsan_ctx {
int disable_count; /* disable counter */
int atomic_next; /* number of following atomic ops */
/*
* We distinguish between: (a) nestable atomic regions that may contain
* other nestable regions; and (b) flat atomic regions that do not keep
* track of nesting. Both (a) and (b) are entirely independent of each
* other, and a flat region may be started in a nestable region or
* vice-versa.
*
* This is required because, for example, in the annotations for
* seqlocks, we declare seqlock writer critical sections as (a) nestable
* atomic regions, but reader critical sections as (b) flat atomic
* regions, but have encountered cases where seqlock reader critical
* sections are contained within writer critical sections (the opposite
* may be possible, too).
*
* To support these cases, we independently track the depth of nesting
* for (a), and whether the leaf level is flat for (b).
*/
int atomic_nest_count;
bool in_flat_atomic;
/*
* Access mask for all accesses if non-zero.
*/
unsigned long access_mask;
/* List of scoped accesses. */
struct list_head scoped_accesses;
};
/**
* kcsan_init - initialize KCSAN runtime
*/
void kcsan_init(void);
#else /* CONFIG_KCSAN */
static inline void kcsan_init(void) { }
#endif /* CONFIG_KCSAN */
#endif /* _LINUX_KCSAN_H */
......@@ -31,6 +31,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
......@@ -1197,6 +1198,9 @@ struct task_struct {
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
#ifdef CONFIG_KCSAN
struct kcsan_ctx kcsan_ctx;
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
......
......@@ -37,8 +37,24 @@
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <linux/kcsan-checks.h>
#include <asm/processor.h>
/*
* The seqlock interface does not prescribe a precise sequence of read
* begin/retry/end. For readers, typically there is a call to
* read_seqcount_begin() and read_seqcount_retry(), however, there are more
* esoteric cases which do not follow this pattern.
*
* As a consequence, we take the following best-effort approach for raw usage
* via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
* pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
* atomics; if there is a matching read_seqcount_retry() call, no following
* memory operations are considered atomic. Usage of seqlocks via seqlock_t
* interface is not affected.
*/
#define KCSAN_SEQLOCK_REGION_MAX 1000
/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
......@@ -115,6 +131,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
cpu_relax();
goto repeat;
}
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
......@@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
......@@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret & ~1;
}
......@@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*/
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
return unlikely(s->sequence != start);
kcsan_atomic_next(0);
return unlikely(READ_ONCE(s->sequence) != start);
}
/**
......@@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline void raw_write_seqcount_begin(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
......@@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
kcsan_nestable_atomic_end();
}
/**
......@@ -243,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
* Note that writes surrounding the barrier should be declared atomic (e.g.
* via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because
* neither writes before and after the barrier are enclosed in a seq-writer
* critical section that would ensure readers are aware of ongoing writes.
*
* seqcount_t seq;
* bool X = true, Y = false;
*
......@@ -262,18 +291,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
*
* void write(void)
* {
* Y = true;
* WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
* X = false;
* WRITE_ONCE(X, false);
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
kcsan_nestable_atomic_end();
}
static inline int raw_read_seqcount_latch(seqcount_t *s)
......@@ -398,7 +429,9 @@ static inline void write_seqcount_end(seqcount_t *s)
static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
kcsan_nestable_atomic_begin();
s->sequence+=2;
kcsan_nestable_atomic_end();
}
typedef struct {
......@@ -430,11 +463,21 @@ typedef struct {
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
unsigned ret = read_seqcount_begin(&sl->seqcount);
kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
kcsan_flat_atomic_begin();
return ret;
}
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
/*
* Assume not nested: read_seqretry() may be called multiple times when
* completing read critical section.
*/
kcsan_flat_atomic_end();
return read_seqcount_retry(&sl->seqcount, start);
}
......
......@@ -2,9 +2,9 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
#include <linux/instrumented.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kasan-checks.h>
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
......@@ -58,7 +58,7 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
......@@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
kasan_check_write(to, n);
instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
......@@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
......@@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
kasan_check_read(from, n);
instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
......@@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
kasan_check_write(to, n);
instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
......@@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (access_ok(to, n)) {
kasan_check_read(from, n);
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
......
......@@ -174,6 +174,16 @@ struct task_struct init_task
#ifdef CONFIG_KASAN
.kasan_depth = 1,
#endif
#ifdef CONFIG_KCSAN
.kcsan_ctx = {
.disable_count = 0,
.atomic_next = 0,
.atomic_nest_count = 0,
.in_flat_atomic = false,
.access_mask = 0,
.scoped_accesses = {LIST_POISON1, NULL},
},
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
.softirqs_enabled = 1,
#endif
......
......@@ -95,6 +95,7 @@
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
#include <linux/mem_encrypt.h>
#include <linux/kcsan.h>
#include <asm/io.h>
#include <asm/bugs.h>
......@@ -1036,6 +1037,7 @@ asmlinkage __visible void __init start_kernel(void)
acpi_subsystem_init();
arch_post_acpi_subsys_init();
sfi_init_late();
kcsan_init();
/* Do the rest non-__init'ed, we're now alive */
arch_call_rest_init();
......
......@@ -23,6 +23,9 @@ endif
# Prevents flicker of uninteresting __do_softirq()/__local_bh_disable_ip()
# in coverage traces.
KCOV_INSTRUMENT_softirq.o := n
# Avoid KCSAN instrumentation in softirq ("No shared variables, all the data
# are CPU local" => assume no data races), to reduce overhead in interrupts.
KCSAN_SANITIZE_softirq.o = n
# These are called from save_stack_trace() on slub debug path,
# and produce insane amounts of uninteresting coverage.
KCOV_INSTRUMENT_module.o := n
......@@ -31,6 +34,7 @@ KCOV_INSTRUMENT_stacktrace.o := n
# Don't self-instrument.
KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
KCSAN_SANITIZE_kcov.o := n
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
# cond_syscall is currently not LTO compatible
......@@ -103,6 +107,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_BPF) += bpf/
obj-$(CONFIG_KCSAN) += kcsan/
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_PERF_EVENTS) += events/
......@@ -121,6 +126,7 @@ obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o
obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
KASAN_SANITIZE_stackleak.o := n
KCSAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_stackleak.o := n
$(obj)/configs.o: $(obj)/config_data.gz
......
# SPDX-License-Identifier: GPL-2.0
KCSAN_SANITIZE := n
KCOV_INSTRUMENT := n
UBSAN_SANITIZE := n
CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \
$(call cc-option,-fno-stack-protector,)
obj-y := core.o debugfs.o report.o
obj-$(CONFIG_KCSAN_SELFTEST) += test.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment