Commit 5cbaefe9 authored by Ingo Molnar's avatar Ingo Molnar

kcsan: Improve various small stylistic details

Tidy up a few bits:

  - Fix typos and grammar, improve wording.

  - Remove spurious newlines that are col80 warning artifacts where the
    resulting line-break is worse than the disease it's curing.

  - Use core kernel coding style to improve readability and reduce
    spurious code pattern variations.

  - Use better vertical alignment for structure definitions and initialization
    sequences.

  - Misc other small details.

No change in functionality intended.

Cc: linux-kernel@vger.kernel.org
Cc: Marco Elver <elver@google.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8e1d58ae
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#define KASAN_ABI_VERSION 5 #define KASAN_ABI_VERSION 5
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
/* emulate gcc's __SANITIZE_ADDRESS__ flag */ /* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__ #define __SANITIZE_ADDRESS__
#define __no_sanitize_address \ #define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress"))) __attribute__((no_sanitize("address", "hwaddress")))
......
...@@ -313,7 +313,7 @@ unsigned long read_word_at_a_time(const void *addr) ...@@ -313,7 +313,7 @@ unsigned long read_word_at_a_time(const void *addr)
#include <linux/kcsan.h> #include <linux/kcsan.h>
/* /*
* data_race: macro to document that accesses in an expression may conflict with * data_race(): macro to document that accesses in an expression may conflict with
* other concurrent accesses resulting in data races, but the resulting * other concurrent accesses resulting in data races, but the resulting
* behaviour is deemed safe regardless. * behaviour is deemed safe regardless.
* *
......
...@@ -12,13 +12,13 @@ ...@@ -12,13 +12,13 @@
#define KCSAN_ACCESS_ATOMIC 0x2 #define KCSAN_ACCESS_ATOMIC 0x2
/* /*
* __kcsan_*: Always calls into runtime when KCSAN is enabled. This may be used * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
* even in compilation units that selectively disable KCSAN, but must use KCSAN * even in compilation units that selectively disable KCSAN, but must use KCSAN
* to validate access to an address. Never use these in header files! * to validate access to an address. Never use these in header files!
*/ */
#ifdef CONFIG_KCSAN #ifdef CONFIG_KCSAN
/** /**
* __kcsan_check_access - check generic access for data race * __kcsan_check_access - check generic access for data races
* *
* @ptr address of access * @ptr address of access
* @size size of access * @size size of access
...@@ -32,7 +32,7 @@ static inline void __kcsan_check_access(const volatile void *ptr, size_t size, ...@@ -32,7 +32,7 @@ static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
#endif #endif
/* /*
* kcsan_*: Only calls into runtime when the particular compilation unit has * kcsan_*: Only calls into the runtime when the particular compilation unit has
* KCSAN instrumentation enabled. May be used in header files. * KCSAN instrumentation enabled. May be used in header files.
*/ */
#ifdef __SANITIZE_THREAD__ #ifdef __SANITIZE_THREAD__
...@@ -77,16 +77,12 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size, ...@@ -77,16 +77,12 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
/* /*
* Check for atomic accesses: if atomic access are not ignored, this simply * Check for atomic accesses: if atomic accesses are not ignored, this simply
* aliases to kcsan_check_access, otherwise becomes a no-op. * aliases to kcsan_check_access(), otherwise becomes a no-op.
*/ */
#ifdef CONFIG_KCSAN_IGNORE_ATOMICS #ifdef CONFIG_KCSAN_IGNORE_ATOMICS
#define kcsan_check_atomic_read(...) \ #define kcsan_check_atomic_read(...) do { } while (0)
do { \ #define kcsan_check_atomic_write(...) do { } while (0)
} while (0)
#define kcsan_check_atomic_write(...) \
do { \
} while (0)
#else #else
#define kcsan_check_atomic_read(ptr, size) \ #define kcsan_check_atomic_read(ptr, size) \
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC) kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
......
...@@ -95,19 +95,12 @@ void kcsan_atomic_next(int n); ...@@ -95,19 +95,12 @@ void kcsan_atomic_next(int n);
#else /* CONFIG_KCSAN */ #else /* CONFIG_KCSAN */
static inline void kcsan_init(void) { } static inline void kcsan_init(void) { }
static inline void kcsan_disable_current(void) { } static inline void kcsan_disable_current(void) { }
static inline void kcsan_enable_current(void) { } static inline void kcsan_enable_current(void) { }
static inline void kcsan_nestable_atomic_begin(void) { } static inline void kcsan_nestable_atomic_begin(void) { }
static inline void kcsan_nestable_atomic_end(void) { } static inline void kcsan_nestable_atomic_end(void) { }
static inline void kcsan_flat_atomic_begin(void) { } static inline void kcsan_flat_atomic_begin(void) { }
static inline void kcsan_flat_atomic_end(void) { } static inline void kcsan_flat_atomic_end(void) { }
static inline void kcsan_atomic_next(int n) { } static inline void kcsan_atomic_next(int n) { }
#endif /* CONFIG_KCSAN */ #endif /* CONFIG_KCSAN */
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
* *
* As a consequence, we take the following best-effort approach for raw usage * As a consequence, we take the following best-effort approach for raw usage
* via seqcount_t under KCSAN: upon beginning a seq-reader critical section, * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
* pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
* atomics; if there is a matching read_seqcount_retry() call, no following * atomics; if there is a matching read_seqcount_retry() call, no following
* memory operations are considered atomic. Usage of seqlocks via seqlock_t * memory operations are considered atomic. Usage of seqlocks via seqlock_t
* interface is not affected. * interface is not affected.
...@@ -265,7 +265,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s) ...@@ -265,7 +265,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can * usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s. * collapse the two back-to-back wmb()s.
* *
* Note that, writes surrounding the barrier should be declared atomic (e.g. * Note that writes surrounding the barrier should be declared atomic (e.g.
* via WRITE_ONCE): a) to ensure the writes become visible to other threads * via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are * atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because * meant to propagate to the reader critical section. This is necessary because
...@@ -465,7 +465,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) ...@@ -465,7 +465,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
{ {
unsigned ret = read_seqcount_begin(&sl->seqcount); unsigned ret = read_seqcount_begin(&sl->seqcount);
kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry */ kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
kcsan_flat_atomic_begin(); kcsan_flat_atomic_begin();
return ret; return ret;
} }
...@@ -473,7 +473,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) ...@@ -473,7 +473,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{ {
/* /*
* Assume not nested: read_seqretry may be called multiple times when * Assume not nested: read_seqretry() may be called multiple times when
* completing read critical section. * completing read critical section.
*/ */
kcsan_flat_atomic_end(); kcsan_flat_atomic_end();
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <linux/jiffies.h> #include <linux/jiffies.h>
/* /*
* Helper that returns true if access to ptr should be considered as an atomic * Helper that returns true if access to @ptr should be considered an atomic
* access, even though it is not explicitly atomic. * access, even though it is not explicitly atomic.
* *
* List all volatile globals that have been observed in races, to suppress * List all volatile globals that have been observed in races, to suppress
......
...@@ -50,11 +50,11 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { ...@@ -50,11 +50,11 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
* slot=9: [10, 11, 9] * slot=9: [10, 11, 9]
* slot=63: [64, 65, 63] * slot=63: [64, 65, 63]
*/ */
#define NUM_SLOTS (1 + 2 * KCSAN_CHECK_ADJACENT) #define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS)) #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
/* /*
* SLOT_IDX_FAST is used in fast-path. Not first checking the address's primary * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
* slot (middle) is fine if we assume that data races occur rarely. The set of * slot (middle) is fine if we assume that data races occur rarely. The set of
* indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
* {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}. * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
...@@ -68,9 +68,9 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { ...@@ -68,9 +68,9 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
* zero-initialized state matches INVALID_WATCHPOINT. * zero-initialized state matches INVALID_WATCHPOINT.
* *
* Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
* use more complicated SLOT_IDX_FAST calculation with modulo in fast-path. * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
*/ */
static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1]; static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
/* /*
* Instructions to skip watching counter, used in should_watch(). We use a * Instructions to skip watching counter, used in should_watch(). We use a
...@@ -78,7 +78,8 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1]; ...@@ -78,7 +78,8 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1];
*/ */
static DEFINE_PER_CPU(long, kcsan_skip); static DEFINE_PER_CPU(long, kcsan_skip);
static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size, static inline atomic_long_t *find_watchpoint(unsigned long addr,
size_t size,
bool expect_write, bool expect_write,
long *encoded_watchpoint) long *encoded_watchpoint)
{ {
...@@ -110,8 +111,8 @@ static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size, ...@@ -110,8 +111,8 @@ static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size,
return NULL; return NULL;
} }
static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size, static inline atomic_long_t *
bool is_write) insert_watchpoint(unsigned long addr, size_t size, bool is_write)
{ {
const int slot = watchpoint_slot(addr); const int slot = watchpoint_slot(addr);
const long encoded_watchpoint = encode_watchpoint(addr, size, is_write); const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
...@@ -120,21 +121,16 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size, ...@@ -120,21 +121,16 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,
/* Check slot index logic, ensuring we stay within array bounds. */ /* Check slot index logic, ensuring we stay within array bounds. */
BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT); BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT + 1) != 0); BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1, BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
KCSAN_CHECK_ADJACENT) != BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
ARRAY_SIZE(watchpoints) - 1);
BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1,
KCSAN_CHECK_ADJACENT + 1) !=
ARRAY_SIZE(watchpoints) - NUM_SLOTS);
for (i = 0; i < NUM_SLOTS; ++i) { for (i = 0; i < NUM_SLOTS; ++i) {
long expect_val = INVALID_WATCHPOINT; long expect_val = INVALID_WATCHPOINT;
/* Try to acquire this slot. */ /* Try to acquire this slot. */
watchpoint = &watchpoints[SLOT_IDX(slot, i)]; watchpoint = &watchpoints[SLOT_IDX(slot, i)];
if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
encoded_watchpoint))
return watchpoint; return watchpoint;
} }
...@@ -150,11 +146,10 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size, ...@@ -150,11 +146,10 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,
* 2. the thread that set up the watchpoint already removed it; * 2. the thread that set up the watchpoint already removed it;
* 3. the watchpoint was removed and then re-used. * 3. the watchpoint was removed and then re-used.
*/ */
static inline bool try_consume_watchpoint(atomic_long_t *watchpoint, static inline bool
long encoded_watchpoint) try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
{ {
return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
CONSUMED_WATCHPOINT);
} }
/* /*
...@@ -162,14 +157,13 @@ static inline bool try_consume_watchpoint(atomic_long_t *watchpoint, ...@@ -162,14 +157,13 @@ static inline bool try_consume_watchpoint(atomic_long_t *watchpoint,
*/ */
static inline bool remove_watchpoint(atomic_long_t *watchpoint) static inline bool remove_watchpoint(atomic_long_t *watchpoint)
{ {
return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT;
CONSUMED_WATCHPOINT;
} }
static inline struct kcsan_ctx *get_ctx(void) static inline struct kcsan_ctx *get_ctx(void)
{ {
/* /*
* In interrupt, use raw_cpu_ptr to avoid unnecessary checks, that would * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
* also result in calls that generate warnings in uaccess regions. * also result in calls that generate warnings in uaccess regions.
*/ */
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
...@@ -260,7 +254,8 @@ static inline unsigned int get_delay(void) ...@@ -260,7 +254,8 @@ static inline unsigned int get_delay(void)
*/ */
static noinline void kcsan_found_watchpoint(const volatile void *ptr, static noinline void kcsan_found_watchpoint(const volatile void *ptr,
size_t size, bool is_write, size_t size,
bool is_write,
atomic_long_t *watchpoint, atomic_long_t *watchpoint,
long encoded_watchpoint) long encoded_watchpoint)
{ {
...@@ -296,8 +291,8 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, ...@@ -296,8 +291,8 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
user_access_restore(flags); user_access_restore(flags);
} }
static noinline void kcsan_setup_watchpoint(const volatile void *ptr, static noinline void
size_t size, bool is_write) kcsan_setup_watchpoint(const volatile void *ptr, size_t size, bool is_write)
{ {
atomic_long_t *watchpoint; atomic_long_t *watchpoint;
union { union {
...@@ -346,8 +341,8 @@ static noinline void kcsan_setup_watchpoint(const volatile void *ptr, ...@@ -346,8 +341,8 @@ static noinline void kcsan_setup_watchpoint(const volatile void *ptr,
watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write); watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
if (watchpoint == NULL) { if (watchpoint == NULL) {
/* /*
* Out of capacity: the size of `watchpoints`, and the frequency * Out of capacity: the size of 'watchpoints', and the frequency
* with which `should_watch()` returns true should be tweaked so * with which should_watch() returns true should be tweaked so
* that this case happens very rarely. * that this case happens very rarely.
*/ */
kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY); kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
......
...@@ -41,22 +41,14 @@ static DEFINE_SPINLOCK(report_filterlist_lock); ...@@ -41,22 +41,14 @@ static DEFINE_SPINLOCK(report_filterlist_lock);
static const char *counter_to_name(enum kcsan_counter_id id) static const char *counter_to_name(enum kcsan_counter_id id)
{ {
switch (id) { switch (id) {
case KCSAN_COUNTER_USED_WATCHPOINTS: case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints";
return "used_watchpoints"; case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints";
case KCSAN_COUNTER_SETUP_WATCHPOINTS: case KCSAN_COUNTER_DATA_RACES: return "data_races";
return "setup_watchpoints"; case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity";
case KCSAN_COUNTER_DATA_RACES: case KCSAN_COUNTER_REPORT_RACES: return "report_races";
return "data_races"; case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin";
case KCSAN_COUNTER_NO_CAPACITY: case KCSAN_COUNTER_UNENCODABLE_ACCESSES: return "unencodable_accesses";
return "no_capacity"; case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: return "encoding_false_positives";
case KCSAN_COUNTER_REPORT_RACES:
return "report_races";
case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN:
return "races_unknown_origin";
case KCSAN_COUNTER_UNENCODABLE_ACCESSES:
return "unencodable_accesses";
case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES:
return "encoding_false_positives";
case KCSAN_COUNTER_COUNT: case KCSAN_COUNTER_COUNT:
BUG(); BUG();
} }
...@@ -116,7 +108,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr) ...@@ -116,7 +108,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr)
if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset)) if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
return false; return false;
func_addr -= offset; /* get function start */ func_addr -= offset; /* Get function start */
spin_lock_irqsave(&report_filterlist_lock, flags); spin_lock_irqsave(&report_filterlist_lock, flags);
if (report_filterlist.used == 0) if (report_filterlist.used == 0)
...@@ -195,6 +187,7 @@ static ssize_t insert_report_filterlist(const char *func) ...@@ -195,6 +187,7 @@ static ssize_t insert_report_filterlist(const char *func)
out: out:
spin_unlock_irqrestore(&report_filterlist_lock, flags); spin_unlock_irqrestore(&report_filterlist_lock, flags);
return ret; return ret;
} }
...@@ -226,8 +219,8 @@ static int debugfs_open(struct inode *inode, struct file *file) ...@@ -226,8 +219,8 @@ static int debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show_info, NULL); return single_open(file, show_info, NULL);
} }
static ssize_t debugfs_write(struct file *file, const char __user *buf, static ssize_t
size_t count, loff_t *off) debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
{ {
char kbuf[KSYM_NAME_LEN]; char kbuf[KSYM_NAME_LEN];
char *arg; char *arg;
...@@ -264,10 +257,13 @@ static ssize_t debugfs_write(struct file *file, const char __user *buf, ...@@ -264,10 +257,13 @@ static ssize_t debugfs_write(struct file *file, const char __user *buf,
return count; return count;
} }
static const struct file_operations debugfs_ops = { .read = seq_read, static const struct file_operations debugfs_ops =
{
.read = seq_read,
.open = debugfs_open, .open = debugfs_open,
.write = debugfs_write, .write = debugfs_write,
.release = single_release }; .release = single_release
};
void __init kcsan_debugfs_init(void) void __init kcsan_debugfs_init(void)
{ {
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "kcsan.h" #include "kcsan.h"
#define SLOT_RANGE PAGE_SIZE #define SLOT_RANGE PAGE_SIZE
#define INVALID_WATCHPOINT 0 #define INVALID_WATCHPOINT 0
#define CONSUMED_WATCHPOINT 1 #define CONSUMED_WATCHPOINT 1
...@@ -34,24 +35,24 @@ ...@@ -34,24 +35,24 @@
* Both these are assumed to be very unlikely. However, in case it still happens * Both these are assumed to be very unlikely. However, in case it still happens
* happens, the report logic will filter out the false positive (see report.c). * happens, the report logic will filter out the false positive (see report.c).
*/ */
#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG - 1 - WATCHPOINT_SIZE_BITS) #define WATCHPOINT_ADDR_BITS (BITS_PER_LONG-1 - WATCHPOINT_SIZE_BITS)
/* /*
* Masks to set/retrieve the encoded data. * Masks to set/retrieve the encoded data.
*/ */
#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG - 1) #define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG-1)
#define WATCHPOINT_SIZE_MASK \ #define WATCHPOINT_SIZE_MASK \
GENMASK(BITS_PER_LONG - 2, BITS_PER_LONG - 2 - WATCHPOINT_SIZE_BITS) GENMASK(BITS_PER_LONG-2, BITS_PER_LONG-2 - WATCHPOINT_SIZE_BITS)
#define WATCHPOINT_ADDR_MASK \ #define WATCHPOINT_ADDR_MASK \
GENMASK(BITS_PER_LONG - 3 - WATCHPOINT_SIZE_BITS, 0) GENMASK(BITS_PER_LONG-3 - WATCHPOINT_SIZE_BITS, 0)
static inline bool check_encodable(unsigned long addr, size_t size) static inline bool check_encodable(unsigned long addr, size_t size)
{ {
return size <= MAX_ENCODABLE_SIZE; return size <= MAX_ENCODABLE_SIZE;
} }
static inline long encode_watchpoint(unsigned long addr, size_t size, static inline long
bool is_write) encode_watchpoint(unsigned long addr, size_t size, bool is_write)
{ {
return (long)((is_write ? WATCHPOINT_WRITE_MASK : 0) | return (long)((is_write ? WATCHPOINT_WRITE_MASK : 0) |
(size << WATCHPOINT_ADDR_BITS) | (size << WATCHPOINT_ADDR_BITS) |
...@@ -59,7 +60,8 @@ static inline long encode_watchpoint(unsigned long addr, size_t size, ...@@ -59,7 +60,8 @@ static inline long encode_watchpoint(unsigned long addr, size_t size,
} }
static inline bool decode_watchpoint(long watchpoint, static inline bool decode_watchpoint(long watchpoint,
unsigned long *addr_masked, size_t *size, unsigned long *addr_masked,
size_t *size,
bool *is_write) bool *is_write)
{ {
if (watchpoint == INVALID_WATCHPOINT || if (watchpoint == INVALID_WATCHPOINT ||
...@@ -67,8 +69,7 @@ static inline bool decode_watchpoint(long watchpoint, ...@@ -67,8 +69,7 @@ static inline bool decode_watchpoint(long watchpoint,
return false; return false;
*addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK; *addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK;
*size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >> *size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >> WATCHPOINT_ADDR_BITS;
WATCHPOINT_ADDR_BITS;
*is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK); *is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK);
return true; return true;
......
...@@ -72,14 +72,14 @@ enum kcsan_counter_id { ...@@ -72,14 +72,14 @@ enum kcsan_counter_id {
/* /*
* Increment/decrement counter with given id; avoid calling these in fast-path. * Increment/decrement counter with given id; avoid calling these in fast-path.
*/ */
void kcsan_counter_inc(enum kcsan_counter_id id); extern void kcsan_counter_inc(enum kcsan_counter_id id);
void kcsan_counter_dec(enum kcsan_counter_id id); extern void kcsan_counter_dec(enum kcsan_counter_id id);
/* /*
* Returns true if data races in the function symbol that maps to func_addr * Returns true if data races in the function symbol that maps to func_addr
* (offsets are ignored) should *not* be reported. * (offsets are ignored) should *not* be reported.
*/ */
bool kcsan_skip_report_debugfs(unsigned long func_addr); extern bool kcsan_skip_report_debugfs(unsigned long func_addr);
enum kcsan_report_type { enum kcsan_report_type {
/* /*
...@@ -99,10 +99,11 @@ enum kcsan_report_type { ...@@ -99,10 +99,11 @@ enum kcsan_report_type {
*/ */
KCSAN_REPORT_RACE_UNKNOWN_ORIGIN, KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
}; };
/* /*
* Print a race report from thread that encountered the race. * Print a race report from thread that encountered the race.
*/ */
void kcsan_report(const volatile void *ptr, size_t size, bool is_write, extern void kcsan_report(const volatile void *ptr, size_t size, bool is_write,
bool value_change, int cpu_id, enum kcsan_report_type type); bool value_change, int cpu_id, enum kcsan_report_type type);
#endif /* _KERNEL_KCSAN_KCSAN_H */ #endif /* _KERNEL_KCSAN_KCSAN_H */
...@@ -40,8 +40,8 @@ static DEFINE_SPINLOCK(report_lock); ...@@ -40,8 +40,8 @@ static DEFINE_SPINLOCK(report_lock);
/* /*
* Special rules to skip reporting. * Special rules to skip reporting.
*/ */
static bool skip_report(bool is_write, bool value_change, static bool
unsigned long top_frame) skip_report(bool is_write, bool value_change, unsigned long top_frame)
{ {
if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write && if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) && is_write &&
!value_change) { !value_change) {
...@@ -105,6 +105,7 @@ static int sym_strcmp(void *addr1, void *addr2) ...@@ -105,6 +105,7 @@ static int sym_strcmp(void *addr1, void *addr2)
snprintf(buf1, sizeof(buf1), "%pS", addr1); snprintf(buf1, sizeof(buf1), "%pS", addr1);
snprintf(buf2, sizeof(buf2), "%pS", addr2); snprintf(buf2, sizeof(buf2), "%pS", addr2);
return strncmp(buf1, buf2, sizeof(buf1)); return strncmp(buf1, buf2, sizeof(buf1));
} }
...@@ -116,8 +117,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write, ...@@ -116,8 +117,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write,
enum kcsan_report_type type) enum kcsan_report_type type)
{ {
unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
int num_stack_entries = int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
int skipnr = get_stack_skipnr(stack_entries, num_stack_entries); int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
int other_skipnr; int other_skipnr;
...@@ -131,7 +131,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write, ...@@ -131,7 +131,7 @@ static bool print_report(const volatile void *ptr, size_t size, bool is_write,
other_skipnr = get_stack_skipnr(other_info.stack_entries, other_skipnr = get_stack_skipnr(other_info.stack_entries,
other_info.num_stack_entries); other_info.num_stack_entries);
/* value_change is only known for the other thread */ /* @value_change is only known for the other thread */
if (skip_report(other_info.is_write, value_change, if (skip_report(other_info.is_write, value_change,
other_info.stack_entries[other_skipnr])) other_info.stack_entries[other_skipnr]))
return false; return false;
...@@ -246,8 +246,7 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr, ...@@ -246,8 +246,7 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
other_info.is_write = is_write; other_info.is_write = is_write;
other_info.task_pid = in_task() ? task_pid_nr(current) : -1; other_info.task_pid = in_task() ? task_pid_nr(current) : -1;
other_info.cpu_id = cpu_id; other_info.cpu_id = cpu_id;
other_info.num_stack_entries = stack_trace_save( other_info.num_stack_entries = stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1);
other_info.stack_entries, NUM_STACK_ENTRIES, 1);
spin_unlock_irqrestore(&report_lock, *flags); spin_unlock_irqrestore(&report_lock, *flags);
...@@ -299,6 +298,7 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr, ...@@ -299,6 +298,7 @@ static bool prepare_report(unsigned long *flags, const volatile void *ptr,
} }
spin_unlock_irqrestore(&report_lock, *flags); spin_unlock_irqrestore(&report_lock, *flags);
goto retry; goto retry;
} }
...@@ -309,9 +309,7 @@ void kcsan_report(const volatile void *ptr, size_t size, bool is_write, ...@@ -309,9 +309,7 @@ void kcsan_report(const volatile void *ptr, size_t size, bool is_write,
kcsan_disable_current(); kcsan_disable_current();
if (prepare_report(&flags, ptr, size, is_write, cpu_id, type)) { if (prepare_report(&flags, ptr, size, is_write, cpu_id, type)) {
if (print_report(ptr, size, is_write, value_change, cpu_id, if (print_report(ptr, size, is_write, value_change, cpu_id, type) && panic_on_warn)
type) &&
panic_on_warn)
panic("panic_on_warn set ...\n"); panic("panic_on_warn set ...\n");
release_report(&flags, type); release_report(&flags, type);
......
...@@ -34,7 +34,7 @@ static bool test_encode_decode(void) ...@@ -34,7 +34,7 @@ static bool test_encode_decode(void)
if (WARN_ON(!check_encodable(addr, size))) if (WARN_ON(!check_encodable(addr, size)))
return false; return false;
/* encode and decode */ /* Encode and decode */
{ {
const long encoded_watchpoint = const long encoded_watchpoint =
encode_watchpoint(addr, size, is_write); encode_watchpoint(addr, size, is_write);
...@@ -42,7 +42,7 @@ static bool test_encode_decode(void) ...@@ -42,7 +42,7 @@ static bool test_encode_decode(void)
size_t verif_size; size_t verif_size;
bool verif_is_write; bool verif_is_write;
/* check special watchpoints */ /* Check special watchpoints */
if (WARN_ON(decode_watchpoint( if (WARN_ON(decode_watchpoint(
INVALID_WATCHPOINT, &verif_masked_addr, INVALID_WATCHPOINT, &verif_masked_addr,
&verif_size, &verif_is_write))) &verif_size, &verif_is_write)))
...@@ -52,7 +52,7 @@ static bool test_encode_decode(void) ...@@ -52,7 +52,7 @@ static bool test_encode_decode(void)
&verif_size, &verif_is_write))) &verif_size, &verif_is_write)))
return false; return false;
/* check decoding watchpoint returns same data */ /* Check decoding watchpoint returns same data */
if (WARN_ON(!decode_watchpoint( if (WARN_ON(!decode_watchpoint(
encoded_watchpoint, &verif_masked_addr, encoded_watchpoint, &verif_masked_addr,
&verif_size, &verif_is_write))) &verif_size, &verif_is_write)))
......
...@@ -7,7 +7,7 @@ endif ...@@ -7,7 +7,7 @@ endif
# that is not a function of syscall inputs. E.g. involuntary context switches. # that is not a function of syscall inputs. E.g. involuntary context switches.
KCOV_INSTRUMENT := n KCOV_INSTRUMENT := n
# There are numerous races here, however, most of them due to plain accesses. # There are numerous data races here, however, most of them are due to plain accesses.
# This would make it even harder for syzbot to find reproducers, because these # This would make it even harder for syzbot to find reproducers, because these
# bugs trigger without specific input. Disable by default, but should re-enable # bugs trigger without specific input. Disable by default, but should re-enable
# eventually. # eventually.
......
...@@ -6,7 +6,6 @@ config HAVE_ARCH_KCSAN ...@@ -6,7 +6,6 @@ config HAVE_ARCH_KCSAN
menuconfig KCSAN menuconfig KCSAN
bool "KCSAN: watchpoint-based dynamic data race detector" bool "KCSAN: watchpoint-based dynamic data race detector"
depends on HAVE_ARCH_KCSAN && !KASAN && STACKTRACE depends on HAVE_ARCH_KCSAN && !KASAN && STACKTRACE
default n
help help
Kernel Concurrency Sanitizer is a dynamic data race detector, which Kernel Concurrency Sanitizer is a dynamic data race detector, which
uses a watchpoint-based sampling approach to detect races. See uses a watchpoint-based sampling approach to detect races. See
...@@ -16,13 +15,12 @@ if KCSAN ...@@ -16,13 +15,12 @@ if KCSAN
config KCSAN_DEBUG config KCSAN_DEBUG
bool "Debugging of KCSAN internals" bool "Debugging of KCSAN internals"
default n
config KCSAN_SELFTEST config KCSAN_SELFTEST
bool "Perform short selftests on boot" bool "Perform short selftests on boot"
default y default y
help help
Run KCSAN selftests on boot. On test failure, causes kernel to panic. Run KCSAN selftests on boot. On test failure, causes the kernel to panic.
config KCSAN_EARLY_ENABLE config KCSAN_EARLY_ENABLE
bool "Early enable during boot" bool "Early enable during boot"
...@@ -62,7 +60,8 @@ config KCSAN_DELAY_RANDOMIZE ...@@ -62,7 +60,8 @@ config KCSAN_DELAY_RANDOMIZE
default y default y
help help
If delays should be randomized, where the maximum is KCSAN_UDELAY_*. If delays should be randomized, where the maximum is KCSAN_UDELAY_*.
If false, the chosen delays are always KCSAN_UDELAY_* defined above. If false, the chosen delays are always the KCSAN_UDELAY_* values
as defined above.
config KCSAN_SKIP_WATCH config KCSAN_SKIP_WATCH
int "Skip instructions before setting up watchpoint" int "Skip instructions before setting up watchpoint"
...@@ -86,9 +85,9 @@ config KCSAN_SKIP_WATCH_RANDOMIZE ...@@ -86,9 +85,9 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
# parameters, to optimize for the common use-case, we avoid this because: (a) # parameters, to optimize for the common use-case, we avoid this because: (a)
# it would impact performance (and we want to avoid static branch for all # it would impact performance (and we want to avoid static branch for all
# {READ,WRITE}_ONCE, atomic_*, bitops, etc.), and (b) complicate the design # {READ,WRITE}_ONCE, atomic_*, bitops, etc.), and (b) complicate the design
# without real benefit. The main purpose of the below options are for use in # without real benefit. The main purpose of the below options is for use in
# fuzzer configs to control reported data races, and are not expected to be # fuzzer configs to control reported data races, and they are not expected
# switched frequently by a user. # to be switched frequently by a user.
config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
bool "Report races of unknown origin" bool "Report races of unknown origin"
...@@ -103,13 +102,12 @@ config KCSAN_REPORT_VALUE_CHANGE_ONLY ...@@ -103,13 +102,12 @@ config KCSAN_REPORT_VALUE_CHANGE_ONLY
bool "Only report races where watcher observed a data value change" bool "Only report races where watcher observed a data value change"
default y default y
help help
If enabled and a conflicting write is observed via watchpoint, but If enabled and a conflicting write is observed via a watchpoint, but
the data value of the memory location was observed to remain the data value of the memory location was observed to remain
unchanged, do not report the data race. unchanged, do not report the data race.
config KCSAN_IGNORE_ATOMICS config KCSAN_IGNORE_ATOMICS
bool "Do not instrument marked atomic accesses" bool "Do not instrument marked atomic accesses"
default n
help help
If enabled, never instruments marked atomic accesses. This results in If enabled, never instruments marked atomic accesses. This results in
not reporting data races where one access is atomic and the other is not reporting data races where one access is atomic and the other is
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment