Commit ca2ef2d9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'kcsan.2021.11.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull KCSAN updates from Paul McKenney:
 "This contains initialization fixups, testing improvements, addition of
  instruction pointer to data-race reports, and scoped data-race checks"

* tag 'kcsan.2021.11.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  kcsan: selftest: Cleanup and add missing __init
  kcsan: Move ctx to start of argument list
  kcsan: Support reporting scoped read-write access type
  kcsan: Start stack trace with explicit location if provided
  kcsan: Save instruction pointer for scoped accesses
  kcsan: Add ability to pass instruction pointer of access to reporting
  kcsan: test: Fix flaky test case
  kcsan: test: Use kunit_skip() to skip tests
  kcsan: test: Defer kcsan_test_init() after kunit initialization
parents 5593a733 ac20e39e
...@@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask); ...@@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask);
/* Scoped access information. */ /* Scoped access information. */
struct kcsan_scoped_access { struct kcsan_scoped_access {
struct list_head list; struct list_head list;
/* Access information. */
const volatile void *ptr; const volatile void *ptr;
size_t size; size_t size;
int type; int type;
/* Location where scoped access was set up. */
unsigned long ip;
}; };
/* /*
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
......
This diff is collapsed.
...@@ -121,7 +121,7 @@ enum kcsan_value_change { ...@@ -121,7 +121,7 @@ enum kcsan_value_change {
* to be consumed by the reporting thread. No report is printed yet. * to be consumed by the reporting thread. No report is printed yet.
*/ */
void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
int watchpoint_idx); unsigned long ip, int watchpoint_idx);
/* /*
* The calling thread observed that the watchpoint it set up was hit and * The calling thread observed that the watchpoint it set up was hit and
...@@ -129,14 +129,14 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ ...@@ -129,14 +129,14 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ
* thread. * thread.
*/ */
void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
enum kcsan_value_change value_change, int watchpoint_idx, unsigned long ip, enum kcsan_value_change value_change,
u64 old, u64 new, u64 mask); int watchpoint_idx, u64 old, u64 new, u64 mask);
/* /*
* No other thread was observed to race with the access, but the data value * No other thread was observed to race with the access, but the data value
* before and after the stall differs. Reports a race of "unknown origin". * before and after the stall differs. Reports a race of "unknown origin".
*/ */
void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type, void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
u64 old, u64 new, u64 mask); unsigned long ip, u64 old, u64 new, u64 mask);
#endif /* _KERNEL_KCSAN_KCSAN_H */ #endif /* _KERNEL_KCSAN_KCSAN_H */
...@@ -29,6 +29,11 @@ ...@@ -29,6 +29,11 @@
#include <linux/types.h> #include <linux/types.h>
#include <trace/events/printk.h> #include <trace/events/printk.h>
#define KCSAN_TEST_REQUIRES(test, cond) do { \
if (!(cond)) \
kunit_skip((test), "Test requires: " #cond); \
} while (0)
#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE #ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE) #define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
#else #else
...@@ -205,10 +210,12 @@ static bool report_matches(const struct expect_report *r) ...@@ -205,10 +210,12 @@ static bool report_matches(const struct expect_report *r)
"read-write" : "read-write" :
"write") : "write") :
"read"); "read");
const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC);
const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED);
const char *const access_type_aux = const char *const access_type_aux =
(ty & KCSAN_ACCESS_ATOMIC) ? (is_atomic && is_scoped) ? " (marked, scoped)"
" (marked)" : : (is_atomic ? " (marked)"
((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" : ""); : (is_scoped ? " (scoped)" : ""));
if (i == 1) { if (i == 1) {
/* Access 2 */ /* Access 2 */
...@@ -333,7 +340,10 @@ static noinline void test_kernel_assert_bits_nochange(void) ...@@ -333,7 +340,10 @@ static noinline void test_kernel_assert_bits_nochange(void)
ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS); ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS);
} }
/* To check that scoped assertions do trigger anywhere in scope. */ /*
* Scoped assertions do trigger anywhere in scope. However, the report should
* still only point at the start of the scope.
*/
static noinline void test_enter_scope(void) static noinline void test_enter_scope(void)
{ {
int x = 0; int x = 0;
...@@ -488,17 +498,24 @@ static void test_concurrent_races(struct kunit *test) ...@@ -488,17 +498,24 @@ static void test_concurrent_races(struct kunit *test)
__no_kcsan __no_kcsan
static void test_novalue_change(struct kunit *test) static void test_novalue_change(struct kunit *test)
{ {
const struct expect_report expect = { const struct expect_report expect_rw = {
.access = { .access = {
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_read, &test_var, sizeof(test_var), 0 }, { test_kernel_read, &test_var, sizeof(test_var), 0 },
}, },
}; };
const struct expect_report expect_ww = {
.access = {
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
};
bool match_expect = false; bool match_expect = false;
test_kernel_write_nochange(); /* Reset value. */
begin_test_checks(test_kernel_write_nochange, test_kernel_read); begin_test_checks(test_kernel_write_nochange, test_kernel_read);
do { do {
match_expect = report_matches(&expect); match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
} while (!end_test_checks(match_expect)); } while (!end_test_checks(match_expect));
if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY)) if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY))
KUNIT_EXPECT_FALSE(test, match_expect); KUNIT_EXPECT_FALSE(test, match_expect);
...@@ -513,17 +530,24 @@ static void test_novalue_change(struct kunit *test) ...@@ -513,17 +530,24 @@ static void test_novalue_change(struct kunit *test)
__no_kcsan __no_kcsan
static void test_novalue_change_exception(struct kunit *test) static void test_novalue_change_exception(struct kunit *test)
{ {
const struct expect_report expect = { const struct expect_report expect_rw = {
.access = { .access = {
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_read, &test_var, sizeof(test_var), 0 }, { test_kernel_read, &test_var, sizeof(test_var), 0 },
}, },
}; };
const struct expect_report expect_ww = {
.access = {
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
};
bool match_expect = false; bool match_expect = false;
test_kernel_write_nochange_rcu(); /* Reset value. */
begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read); begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read);
do { do {
match_expect = report_matches(&expect); match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
} while (!end_test_checks(match_expect)); } while (!end_test_checks(match_expect));
KUNIT_EXPECT_TRUE(test, match_expect); KUNIT_EXPECT_TRUE(test, match_expect);
} }
...@@ -642,8 +666,7 @@ static void test_read_plain_atomic_write(struct kunit *test) ...@@ -642,8 +666,7 @@ static void test_read_plain_atomic_write(struct kunit *test)
}; };
bool match_expect = false; bool match_expect = false;
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
return;
begin_test_checks(test_kernel_read, test_kernel_write_atomic); begin_test_checks(test_kernel_read, test_kernel_write_atomic);
do { do {
...@@ -665,8 +688,7 @@ static void test_read_plain_atomic_rmw(struct kunit *test) ...@@ -665,8 +688,7 @@ static void test_read_plain_atomic_rmw(struct kunit *test)
}; };
bool match_expect = false; bool match_expect = false;
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
return;
begin_test_checks(test_kernel_read, test_kernel_atomic_rmw); begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
do { do {
...@@ -828,22 +850,22 @@ static void test_assert_exclusive_writer_scoped(struct kunit *test) ...@@ -828,22 +850,22 @@ static void test_assert_exclusive_writer_scoped(struct kunit *test)
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
}, },
}; };
const struct expect_report expect_anywhere = { const struct expect_report expect_inscope = {
.access = { .access = {
{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED }, { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
}, },
}; };
bool match_expect_start = false; bool match_expect_start = false;
bool match_expect_anywhere = false; bool match_expect_inscope = false;
begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange); begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange);
do { do {
match_expect_start |= report_matches(&expect_start); match_expect_start |= report_matches(&expect_start);
match_expect_anywhere |= report_matches(&expect_anywhere); match_expect_inscope |= report_matches(&expect_inscope);
} while (!end_test_checks(match_expect_start && match_expect_anywhere)); } while (!end_test_checks(match_expect_inscope));
KUNIT_EXPECT_TRUE(test, match_expect_start); KUNIT_EXPECT_TRUE(test, match_expect_start);
KUNIT_EXPECT_TRUE(test, match_expect_anywhere); KUNIT_EXPECT_FALSE(test, match_expect_inscope);
} }
__no_kcsan __no_kcsan
...@@ -872,9 +894,9 @@ static void test_assert_exclusive_access_scoped(struct kunit *test) ...@@ -872,9 +894,9 @@ static void test_assert_exclusive_access_scoped(struct kunit *test)
do { do {
match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2); match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2);
match_expect_inscope |= report_matches(&expect_inscope); match_expect_inscope |= report_matches(&expect_inscope);
} while (!end_test_checks(match_expect_start && match_expect_inscope)); } while (!end_test_checks(match_expect_inscope));
KUNIT_EXPECT_TRUE(test, match_expect_start); KUNIT_EXPECT_TRUE(test, match_expect_start);
KUNIT_EXPECT_TRUE(test, match_expect_inscope); KUNIT_EXPECT_FALSE(test, match_expect_inscope);
} }
/* /*
...@@ -1224,7 +1246,7 @@ static void kcsan_test_exit(void) ...@@ -1224,7 +1246,7 @@ static void kcsan_test_exit(void)
tracepoint_synchronize_unregister(); tracepoint_synchronize_unregister();
} }
late_initcall(kcsan_test_init); late_initcall_sync(kcsan_test_init);
module_exit(kcsan_test_exit); module_exit(kcsan_test_exit);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/preempt.h> #include <linux/preempt.h>
...@@ -31,6 +32,7 @@ struct access_info { ...@@ -31,6 +32,7 @@ struct access_info {
int access_type; int access_type;
int task_pid; int task_pid;
int cpu_id; int cpu_id;
unsigned long ip;
}; };
/* /*
...@@ -245,6 +247,10 @@ static const char *get_access_type(int type) ...@@ -245,6 +247,10 @@ static const char *get_access_type(int type)
return "write (scoped)"; return "write (scoped)";
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
return "write (marked, scoped)"; return "write (marked, scoped)";
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
return "read-write (scoped)";
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
return "read-write (marked, scoped)";
default: default:
BUG(); BUG();
} }
...@@ -300,6 +306,48 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries ...@@ -300,6 +306,48 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
return skip; return skip;
} }
/*
* Skips to the first entry that matches the function of @ip, and then replaces
* that entry with @ip, returning the entries to skip.
*/
static int
replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip)
{
unsigned long symbolsize, offset;
unsigned long target_func;
int skip;
if (kallsyms_lookup_size_offset(ip, &symbolsize, &offset))
target_func = ip - offset;
else
goto fallback;
for (skip = 0; skip < num_entries; ++skip) {
unsigned long func = stack_entries[skip];
if (!kallsyms_lookup_size_offset(func, &symbolsize, &offset))
goto fallback;
func -= offset;
if (func == target_func) {
stack_entries[skip] = ip;
return skip;
}
}
fallback:
/* Should not happen; the resulting stack trace is likely misleading. */
WARN_ONCE(1, "Cannot find frame for %pS in stack trace", (void *)ip);
return get_stack_skipnr(stack_entries, num_entries);
}
static int
sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip)
{
return ip ? replace_stack_entry(stack_entries, num_entries, ip) :
get_stack_skipnr(stack_entries, num_entries);
}
/* Compares symbolized strings of addr1 and addr2. */ /* Compares symbolized strings of addr1 and addr2. */
static int sym_strcmp(void *addr1, void *addr2) static int sym_strcmp(void *addr1, void *addr2)
{ {
...@@ -327,12 +375,12 @@ static void print_verbose_info(struct task_struct *task) ...@@ -327,12 +375,12 @@ static void print_verbose_info(struct task_struct *task)
static void print_report(enum kcsan_value_change value_change, static void print_report(enum kcsan_value_change value_change,
const struct access_info *ai, const struct access_info *ai,
const struct other_info *other_info, struct other_info *other_info,
u64 old, u64 new, u64 mask) u64 old, u64 new, u64 mask)
{ {
unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
int skipnr = get_stack_skipnr(stack_entries, num_stack_entries); int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip);
unsigned long this_frame = stack_entries[skipnr]; unsigned long this_frame = stack_entries[skipnr];
unsigned long other_frame = 0; unsigned long other_frame = 0;
int other_skipnr = 0; /* silence uninit warnings */ int other_skipnr = 0; /* silence uninit warnings */
...@@ -344,8 +392,9 @@ static void print_report(enum kcsan_value_change value_change, ...@@ -344,8 +392,9 @@ static void print_report(enum kcsan_value_change value_change,
return; return;
if (other_info) { if (other_info) {
other_skipnr = get_stack_skipnr(other_info->stack_entries, other_skipnr = sanitize_stack_entries(other_info->stack_entries,
other_info->num_stack_entries); other_info->num_stack_entries,
other_info->ai.ip);
other_frame = other_info->stack_entries[other_skipnr]; other_frame = other_info->stack_entries[other_skipnr];
/* @value_change is only known for the other thread */ /* @value_change is only known for the other thread */
...@@ -576,21 +625,23 @@ static bool prepare_report_consumer(unsigned long *flags, ...@@ -576,21 +625,23 @@ static bool prepare_report_consumer(unsigned long *flags,
} }
static struct access_info prepare_access_info(const volatile void *ptr, size_t size, static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
int access_type) int access_type, unsigned long ip)
{ {
return (struct access_info) { return (struct access_info) {
.ptr = ptr, .ptr = ptr,
.size = size, .size = size,
.access_type = access_type, .access_type = access_type,
.task_pid = in_task() ? task_pid_nr(current) : -1, .task_pid = in_task() ? task_pid_nr(current) : -1,
.cpu_id = raw_smp_processor_id() .cpu_id = raw_smp_processor_id(),
/* Only replace stack entry with @ip if scoped access. */
.ip = (access_type & KCSAN_ACCESS_SCOPED) ? ip : 0,
}; };
} }
void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
int watchpoint_idx) unsigned long ip, int watchpoint_idx)
{ {
const struct access_info ai = prepare_access_info(ptr, size, access_type); const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
unsigned long flags; unsigned long flags;
kcsan_disable_current(); kcsan_disable_current();
...@@ -603,10 +654,10 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ ...@@ -603,10 +654,10 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ
} }
void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
enum kcsan_value_change value_change, int watchpoint_idx, unsigned long ip, enum kcsan_value_change value_change,
u64 old, u64 new, u64 mask) int watchpoint_idx, u64 old, u64 new, u64 mask)
{ {
const struct access_info ai = prepare_access_info(ptr, size, access_type); const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
struct other_info *other_info = &other_infos[watchpoint_idx]; struct other_info *other_info = &other_infos[watchpoint_idx];
unsigned long flags = 0; unsigned long flags = 0;
...@@ -637,9 +688,9 @@ void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access ...@@ -637,9 +688,9 @@ void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access
} }
void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type, void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
u64 old, u64 new, u64 mask) unsigned long ip, u64 old, u64 new, u64 mask)
{ {
const struct access_info ai = prepare_access_info(ptr, size, access_type); const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
unsigned long flags; unsigned long flags;
kcsan_disable_current(); kcsan_disable_current();
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#define ITERS_PER_TEST 2000 #define ITERS_PER_TEST 2000
/* Test requirements. */ /* Test requirements. */
static bool test_requires(void) static bool __init test_requires(void)
{ {
/* random should be initialized for the below tests */ /* random should be initialized for the below tests */
return prandom_u32() + prandom_u32() != 0; return prandom_u32() + prandom_u32() != 0;
...@@ -28,14 +28,18 @@ static bool test_requires(void) ...@@ -28,14 +28,18 @@ static bool test_requires(void)
* Test watchpoint encode and decode: check that encoding some access's info, * Test watchpoint encode and decode: check that encoding some access's info,
* and then subsequent decode preserves the access's info. * and then subsequent decode preserves the access's info.
*/ */
static bool test_encode_decode(void) static bool __init test_encode_decode(void)
{ {
int i; int i;
for (i = 0; i < ITERS_PER_TEST; ++i) { for (i = 0; i < ITERS_PER_TEST; ++i) {
size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1; size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1;
bool is_write = !!prandom_u32_max(2); bool is_write = !!prandom_u32_max(2);
unsigned long verif_masked_addr;
long encoded_watchpoint;
bool verif_is_write;
unsigned long addr; unsigned long addr;
size_t verif_size;
prandom_bytes(&addr, sizeof(addr)); prandom_bytes(&addr, sizeof(addr));
if (addr < PAGE_SIZE) if (addr < PAGE_SIZE)
...@@ -44,53 +48,37 @@ static bool test_encode_decode(void) ...@@ -44,53 +48,37 @@ static bool test_encode_decode(void)
if (WARN_ON(!check_encodable(addr, size))) if (WARN_ON(!check_encodable(addr, size)))
return false; return false;
/* Encode and decode */ encoded_watchpoint = encode_watchpoint(addr, size, is_write);
{
const long encoded_watchpoint = /* Check special watchpoints */
encode_watchpoint(addr, size, is_write); if (WARN_ON(decode_watchpoint(INVALID_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write)))
unsigned long verif_masked_addr;
size_t verif_size;
bool verif_is_write;
/* Check special watchpoints */
if (WARN_ON(decode_watchpoint(
INVALID_WATCHPOINT, &verif_masked_addr,
&verif_size, &verif_is_write)))
return false;
if (WARN_ON(decode_watchpoint(
CONSUMED_WATCHPOINT, &verif_masked_addr,
&verif_size, &verif_is_write)))
return false;
/* Check decoding watchpoint returns same data */
if (WARN_ON(!decode_watchpoint(
encoded_watchpoint, &verif_masked_addr,
&verif_size, &verif_is_write)))
return false;
if (WARN_ON(verif_masked_addr !=
(addr & WATCHPOINT_ADDR_MASK)))
goto fail;
if (WARN_ON(verif_size != size))
goto fail;
if (WARN_ON(is_write != verif_is_write))
goto fail;
continue;
fail:
pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n",
__func__, is_write ? "write" : "read", size,
addr, encoded_watchpoint,
verif_is_write ? "write" : "read", verif_size,
verif_masked_addr);
return false; return false;
} if (WARN_ON(decode_watchpoint(CONSUMED_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write)))
return false;
/* Check decoding watchpoint returns same data */
if (WARN_ON(!decode_watchpoint(encoded_watchpoint, &verif_masked_addr, &verif_size, &verif_is_write)))
return false;
if (WARN_ON(verif_masked_addr != (addr & WATCHPOINT_ADDR_MASK)))
goto fail;
if (WARN_ON(verif_size != size))
goto fail;
if (WARN_ON(is_write != verif_is_write))
goto fail;
continue;
fail:
pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n",
__func__, is_write ? "write" : "read", size, addr, encoded_watchpoint,
verif_is_write ? "write" : "read", verif_size, verif_masked_addr);
return false;
} }
return true; return true;
} }
/* Test access matching function. */ /* Test access matching function. */
static bool test_matching_access(void) static bool __init test_matching_access(void)
{ {
if (WARN_ON(!matching_access(10, 1, 10, 1))) if (WARN_ON(!matching_access(10, 1, 10, 1)))
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment