Commit 7066248c authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/mte' into for-next/core

* for-next/mte:
  kasan: Extend KASAN mode kernel parameter
  arm64: mte: Add asymmetric mode support
  arm64: mte: CPU feature detection for Asymm MTE
  arm64: mte: Bitfield definitions for Asymm MTE
  kasan: Remove duplicate of kasan_flag_async
  arm64: kasan: mte: move GCR_EL1 switch to task switch when KASAN disabled
parents dc6bab18 2d27e585
...@@ -194,14 +194,17 @@ additional boot parameters that allow disabling KASAN or controlling features: ...@@ -194,14 +194,17 @@ additional boot parameters that allow disabling KASAN or controlling features:
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``). - ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
- ``kasan.mode=sync`` or ``=async`` controls whether KASAN is configured in - ``kasan.mode=sync``, ``=async`` or ``=asymm`` controls whether KASAN
synchronous or asynchronous mode of execution (default: ``sync``). is configured in synchronous, asynchronous or asymmetric mode of
execution (default: ``sync``).
Synchronous mode: a bad access is detected immediately when a tag Synchronous mode: a bad access is detected immediately when a tag
check fault occurs. check fault occurs.
Asynchronous mode: a bad access detection is delayed. When a tag check Asynchronous mode: a bad access detection is delayed. When a tag check
fault occurs, the information is stored in hardware (in the TFSR_EL1 fault occurs, the information is stored in hardware (in the TFSR_EL1
register for arm64). The kernel periodically checks the hardware and register for arm64). The kernel periodically checks the hardware and
only reports tag faults during these checks. only reports tag faults during these checks.
Asymmetric mode: a bad access is detected synchronously on reads and
asynchronously on writes.
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack - ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
traces collection (default: ``on``). traces collection (default: ``on``).
......
...@@ -243,6 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) ...@@ -243,6 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging_sync() mte_enable_kernel_sync() #define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async() #define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_enable_tagging_asymm() mte_enable_kernel_asymm()
#define arch_force_async_tag_fault() mte_check_tfsr_exit() #define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_get_random_tag() mte_get_random_tag() #define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr) #define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
......
...@@ -130,6 +130,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, ...@@ -130,6 +130,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
void mte_enable_kernel_sync(void); void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void); void mte_enable_kernel_async(void);
void mte_enable_kernel_asymm(void);
#else /* CONFIG_ARM64_MTE */ #else /* CONFIG_ARM64_MTE */
...@@ -161,6 +162,10 @@ static inline void mte_enable_kernel_async(void) ...@@ -161,6 +162,10 @@ static inline void mte_enable_kernel_async(void)
{ {
} }
static inline void mte_enable_kernel_asymm(void)
{
}
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -88,11 +88,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child, ...@@ -88,11 +88,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */ /* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_mode); DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
static inline bool system_uses_mte_async_mode(void) static inline bool system_uses_mte_async_or_asymm_mode(void)
{ {
return static_branch_unlikely(&mte_async_mode); return static_branch_unlikely(&mte_async_or_asymm_mode);
} }
void mte_check_tfsr_el1(void); void mte_check_tfsr_el1(void);
...@@ -121,7 +121,7 @@ static inline void mte_check_tfsr_exit(void) ...@@ -121,7 +121,7 @@ static inline void mte_check_tfsr_exit(void)
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
#else #else
static inline bool system_uses_mte_async_mode(void) static inline bool system_uses_mte_async_or_asymm_mode(void)
{ {
return false; return false;
} }
......
...@@ -626,6 +626,7 @@ ...@@ -626,6 +626,7 @@
#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT) #define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT) #define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT) #define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_ASYMM (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT) #define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_ENIA_SHIFT 31 #define SCTLR_ELx_ENIA_SHIFT 31
...@@ -671,6 +672,7 @@ ...@@ -671,6 +672,7 @@
#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT) #define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT) #define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT) #define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_ASYMM (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT) #define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
#define SCTLR_EL1_BT1 (BIT(36)) #define SCTLR_EL1_BT1 (BIT(36))
...@@ -812,6 +814,7 @@ ...@@ -812,6 +814,7 @@
#define ID_AA64PFR1_MTE_NI 0x0 #define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1 #define ID_AA64PFR1_MTE_EL0 0x1
#define ID_AA64PFR1_MTE 0x2 #define ID_AA64PFR1_MTE 0x2
#define ID_AA64PFR1_MTE_ASYMM 0x3
/* id_aa64zfr0 */ /* id_aa64zfr0 */
#define ID_AA64ZFR0_F64MM_SHIFT 56 #define ID_AA64ZFR0_F64MM_SHIFT 56
......
...@@ -191,13 +191,13 @@ static inline void __uaccess_enable_tco(void) ...@@ -191,13 +191,13 @@ static inline void __uaccess_enable_tco(void)
*/ */
static inline void __uaccess_disable_tco_async(void) static inline void __uaccess_disable_tco_async(void)
{ {
if (system_uses_mte_async_mode()) if (system_uses_mte_async_or_asymm_mode())
__uaccess_disable_tco(); __uaccess_disable_tco();
} }
static inline void __uaccess_enable_tco_async(void) static inline void __uaccess_enable_tco_async(void)
{ {
if (system_uses_mte_async_mode()) if (system_uses_mte_async_or_asymm_mode())
__uaccess_enable_tco(); __uaccess_enable_tco();
} }
......
...@@ -2331,6 +2331,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2331,6 +2331,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.cpu_enable = cpu_enable_mte, .cpu_enable = cpu_enable_mte,
}, },
{
.desc = "Asymmetric MTE Tag Check Fault",
.capability = ARM64_MTE_ASYMM,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT,
.min_field_value = ID_AA64PFR1_MTE_ASYMM,
.sign = FTR_UNSIGNED,
},
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
{ {
.desc = "RCpc load-acquire (LDAPR)", .desc = "RCpc load-acquire (LDAPR)",
......
...@@ -168,9 +168,9 @@ alternative_else_nop_endif ...@@ -168,9 +168,9 @@ alternative_else_nop_endif
.macro mte_set_kernel_gcr, tmp, tmp2 .macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
alternative_if_not ARM64_MTE alternative_cb kasan_hw_tags_enable
b 1f b 1f
alternative_else_nop_endif alternative_cb_end
mov \tmp, KERNEL_GCR_EL1 mov \tmp, KERNEL_GCR_EL1
msr_s SYS_GCR_EL1, \tmp msr_s SYS_GCR_EL1, \tmp
1: 1:
...@@ -178,10 +178,10 @@ alternative_else_nop_endif ...@@ -178,10 +178,10 @@ alternative_else_nop_endif
.endm .endm
.macro mte_set_user_gcr, tsk, tmp, tmp2 .macro mte_set_user_gcr, tsk, tmp, tmp2
#ifdef CONFIG_ARM64_MTE #ifdef CONFIG_KASAN_HW_TAGS
alternative_if_not ARM64_MTE alternative_cb kasan_hw_tags_enable
b 1f b 1f
alternative_else_nop_endif alternative_cb_end
ldr \tmp, [\tsk, #THREAD_MTE_CTRL] ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
mte_set_gcr \tmp, \tmp2 mte_set_gcr \tmp, \tmp2
......
...@@ -26,9 +26,12 @@ ...@@ -26,9 +26,12 @@
static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred); static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */ /*
DEFINE_STATIC_KEY_FALSE(mte_async_mode); * The asynchronous and asymmetric MTE modes have the same behavior for
EXPORT_SYMBOL_GPL(mte_async_mode); * store operations. This flag is set when either of these modes is enabled.
*/
DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
#endif #endif
static void mte_sync_page_tags(struct page *page, pte_t old_pte, static void mte_sync_page_tags(struct page *page, pte_t old_pte,
...@@ -116,7 +119,7 @@ void mte_enable_kernel_sync(void) ...@@ -116,7 +119,7 @@ void mte_enable_kernel_sync(void)
* Make sure we enter this function when no PE has set * Make sure we enter this function when no PE has set
* async mode previously. * async mode previously.
*/ */
WARN_ONCE(system_uses_mte_async_mode(), WARN_ONCE(system_uses_mte_async_or_asymm_mode(),
"MTE async mode enabled system wide!"); "MTE async mode enabled system wide!");
__mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC); __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
...@@ -134,8 +137,34 @@ void mte_enable_kernel_async(void) ...@@ -134,8 +137,34 @@ void mte_enable_kernel_async(void)
* mode in between sync and async, this strategy needs * mode in between sync and async, this strategy needs
* to be reviewed. * to be reviewed.
*/ */
if (!system_uses_mte_async_mode()) if (!system_uses_mte_async_or_asymm_mode())
static_branch_enable(&mte_async_mode); static_branch_enable(&mte_async_or_asymm_mode);
}
void mte_enable_kernel_asymm(void)
{
if (cpus_have_cap(ARM64_MTE_ASYMM)) {
__mte_enable_kernel("asymmetric", SCTLR_ELx_TCF_ASYMM);
/*
* MTE asymm mode behaves as async mode for store
* operations. The mode is set system wide by the
* first PE that executes this function.
*
* Note: If in future KASAN acquires a runtime switching
* mode in between sync and async, this strategy needs
* to be reviewed.
*/
if (!system_uses_mte_async_or_asymm_mode())
static_branch_enable(&mte_async_or_asymm_mode);
} else {
/*
* If the CPU does not support MTE asymmetric mode the
* kernel falls back on synchronous mode which is the
* default for kasan=on.
*/
mte_enable_kernel_sync();
}
} }
#endif #endif
...@@ -179,6 +208,30 @@ static void mte_update_sctlr_user(struct task_struct *task) ...@@ -179,6 +208,30 @@ static void mte_update_sctlr_user(struct task_struct *task)
task->thread.sctlr_user = sctlr; task->thread.sctlr_user = sctlr;
} }
static void mte_update_gcr_excl(struct task_struct *task)
{
/*
* SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by
* mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled.
*/
if (kasan_hw_tags_enabled())
return;
write_sysreg_s(
((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND,
SYS_GCR_EL1);
}
void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst)
{
BUG_ON(nr_inst != 1); /* Branch -> NOP */
if (kasan_hw_tags_enabled())
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
}
void mte_thread_init_user(void) void mte_thread_init_user(void)
{ {
if (!system_supports_mte()) if (!system_supports_mte())
...@@ -198,6 +251,7 @@ void mte_thread_switch(struct task_struct *next) ...@@ -198,6 +251,7 @@ void mte_thread_switch(struct task_struct *next)
return; return;
mte_update_sctlr_user(next); mte_update_sctlr_user(next);
mte_update_gcr_excl(next);
/* /*
* Check if an async tag exception occurred at EL1. * Check if an async tag exception occurred at EL1.
...@@ -243,6 +297,7 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) ...@@ -243,6 +297,7 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
if (task == current) { if (task == current) {
preempt_disable(); preempt_disable();
mte_update_sctlr_user(task); mte_update_sctlr_user(task);
mte_update_gcr_excl(task);
update_sctlr_el1(task->thread.sctlr_user); update_sctlr_el1(task->thread.sctlr_user);
preempt_enable(); preempt_enable();
} }
......
...@@ -40,6 +40,7 @@ HW_DBM ...@@ -40,6 +40,7 @@ HW_DBM
KVM_PROTECTED_MODE KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE MISMATCHED_CACHE_TYPE
MTE MTE
MTE_ASYMM
SPECTRE_V2 SPECTRE_V2
SPECTRE_V3A SPECTRE_V3A
SPECTRE_V4 SPECTRE_V4
......
...@@ -89,7 +89,7 @@ static __always_inline bool kasan_enabled(void) ...@@ -89,7 +89,7 @@ static __always_inline bool kasan_enabled(void)
return static_branch_likely(&kasan_flag_enabled); return static_branch_likely(&kasan_flag_enabled);
} }
static inline bool kasan_has_integrated_init(void) static inline bool kasan_hw_tags_enabled(void)
{ {
return kasan_enabled(); return kasan_enabled();
} }
...@@ -104,7 +104,7 @@ static inline bool kasan_enabled(void) ...@@ -104,7 +104,7 @@ static inline bool kasan_enabled(void)
return IS_ENABLED(CONFIG_KASAN); return IS_ENABLED(CONFIG_KASAN);
} }
static inline bool kasan_has_integrated_init(void) static inline bool kasan_hw_tags_enabled(void)
{ {
return false; return false;
} }
...@@ -125,6 +125,11 @@ static __always_inline void kasan_free_pages(struct page *page, ...@@ -125,6 +125,11 @@ static __always_inline void kasan_free_pages(struct page *page,
#endif /* CONFIG_KASAN_HW_TAGS */ #endif /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_has_integrated_init(void)
{
return kasan_hw_tags_enabled();
}
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
struct kasan_cache { struct kasan_cache {
......
...@@ -88,7 +88,7 @@ static void kasan_test_exit(struct kunit *test) ...@@ -88,7 +88,7 @@ static void kasan_test_exit(struct kunit *test)
*/ */
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
!kasan_async_mode_enabled()) \ kasan_sync_fault_possible()) \
migrate_disable(); \ migrate_disable(); \
KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \ KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
barrier(); \ barrier(); \
......
...@@ -29,6 +29,7 @@ enum kasan_arg_mode { ...@@ -29,6 +29,7 @@ enum kasan_arg_mode {
KASAN_ARG_MODE_DEFAULT, KASAN_ARG_MODE_DEFAULT,
KASAN_ARG_MODE_SYNC, KASAN_ARG_MODE_SYNC,
KASAN_ARG_MODE_ASYNC, KASAN_ARG_MODE_ASYNC,
KASAN_ARG_MODE_ASYMM,
}; };
enum kasan_arg_stacktrace { enum kasan_arg_stacktrace {
...@@ -45,9 +46,9 @@ static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init; ...@@ -45,9 +46,9 @@ static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled); DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
EXPORT_SYMBOL(kasan_flag_enabled); EXPORT_SYMBOL(kasan_flag_enabled);
/* Whether the asynchronous mode is enabled. */ /* Whether the selected mode is synchronous/asynchronous/asymmetric.*/
bool kasan_flag_async __ro_after_init; enum kasan_mode kasan_mode __ro_after_init;
EXPORT_SYMBOL_GPL(kasan_flag_async); EXPORT_SYMBOL_GPL(kasan_mode);
/* Whether to collect alloc/free stack traces. */ /* Whether to collect alloc/free stack traces. */
DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace); DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
...@@ -69,7 +70,7 @@ static int __init early_kasan_flag(char *arg) ...@@ -69,7 +70,7 @@ static int __init early_kasan_flag(char *arg)
} }
early_param("kasan", early_kasan_flag); early_param("kasan", early_kasan_flag);
/* kasan.mode=sync/async */ /* kasan.mode=sync/async/asymm */
static int __init early_kasan_mode(char *arg) static int __init early_kasan_mode(char *arg)
{ {
if (!arg) if (!arg)
...@@ -79,6 +80,8 @@ static int __init early_kasan_mode(char *arg) ...@@ -79,6 +80,8 @@ static int __init early_kasan_mode(char *arg)
kasan_arg_mode = KASAN_ARG_MODE_SYNC; kasan_arg_mode = KASAN_ARG_MODE_SYNC;
else if (!strcmp(arg, "async")) else if (!strcmp(arg, "async"))
kasan_arg_mode = KASAN_ARG_MODE_ASYNC; kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
else if (!strcmp(arg, "asymm"))
kasan_arg_mode = KASAN_ARG_MODE_ASYMM;
else else
return -EINVAL; return -EINVAL;
...@@ -116,11 +119,13 @@ void kasan_init_hw_tags_cpu(void) ...@@ -116,11 +119,13 @@ void kasan_init_hw_tags_cpu(void)
return; return;
/* /*
* Enable async mode only when explicitly requested through * Enable async or asymm modes only when explicitly requested
* the command line. * through the command line.
*/ */
if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC) if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
hw_enable_tagging_async(); hw_enable_tagging_async();
else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
hw_enable_tagging_asymm();
else else
hw_enable_tagging_sync(); hw_enable_tagging_sync();
} }
...@@ -143,15 +148,19 @@ void __init kasan_init_hw_tags(void) ...@@ -143,15 +148,19 @@ void __init kasan_init_hw_tags(void)
case KASAN_ARG_MODE_DEFAULT: case KASAN_ARG_MODE_DEFAULT:
/* /*
* Default to sync mode. * Default to sync mode.
* Do nothing, kasan_flag_async keeps its default value.
*/ */
break; fallthrough;
case KASAN_ARG_MODE_SYNC: case KASAN_ARG_MODE_SYNC:
/* Do nothing, kasan_flag_async keeps its default value. */ /* Sync mode enabled. */
kasan_mode = KASAN_MODE_SYNC;
break; break;
case KASAN_ARG_MODE_ASYNC: case KASAN_ARG_MODE_ASYNC:
/* Async mode enabled. */ /* Async mode enabled. */
kasan_flag_async = true; kasan_mode = KASAN_MODE_ASYNC;
break;
case KASAN_ARG_MODE_ASYMM:
/* Asymm mode enabled. */
kasan_mode = KASAN_MODE_ASYMM;
break; break;
} }
......
...@@ -13,16 +13,28 @@ ...@@ -13,16 +13,28 @@
#include "../slab.h" #include "../slab.h"
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace); DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
extern bool kasan_flag_async __ro_after_init;
enum kasan_mode {
KASAN_MODE_SYNC,
KASAN_MODE_ASYNC,
KASAN_MODE_ASYMM,
};
extern enum kasan_mode kasan_mode __ro_after_init;
static inline bool kasan_stack_collection_enabled(void) static inline bool kasan_stack_collection_enabled(void)
{ {
return static_branch_unlikely(&kasan_flag_stacktrace); return static_branch_unlikely(&kasan_flag_stacktrace);
} }
static inline bool kasan_async_mode_enabled(void) static inline bool kasan_async_fault_possible(void)
{
return kasan_mode == KASAN_MODE_ASYNC || kasan_mode == KASAN_MODE_ASYMM;
}
static inline bool kasan_sync_fault_possible(void)
{ {
return kasan_flag_async; return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
} }
#else #else
...@@ -31,14 +43,17 @@ static inline bool kasan_stack_collection_enabled(void) ...@@ -31,14 +43,17 @@ static inline bool kasan_stack_collection_enabled(void)
return true; return true;
} }
static inline bool kasan_async_mode_enabled(void) static inline bool kasan_async_fault_possible(void)
{ {
return false; return false;
} }
#endif static inline bool kasan_sync_fault_possible(void)
{
return true;
}
extern bool kasan_flag_async __ro_after_init; #endif
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) #define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
...@@ -289,6 +304,9 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) ...@@ -289,6 +304,9 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#ifndef arch_enable_tagging_async #ifndef arch_enable_tagging_async
#define arch_enable_tagging_async() #define arch_enable_tagging_async()
#endif #endif
#ifndef arch_enable_tagging_asymm
#define arch_enable_tagging_asymm()
#endif
#ifndef arch_force_async_tag_fault #ifndef arch_force_async_tag_fault
#define arch_force_async_tag_fault() #define arch_force_async_tag_fault()
#endif #endif
...@@ -304,6 +322,7 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) ...@@ -304,6 +322,7 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define hw_enable_tagging_sync() arch_enable_tagging_sync() #define hw_enable_tagging_sync() arch_enable_tagging_sync()
#define hw_enable_tagging_async() arch_enable_tagging_async() #define hw_enable_tagging_async() arch_enable_tagging_async()
#define hw_enable_tagging_asymm() arch_enable_tagging_asymm()
#define hw_force_async_tag_fault() arch_force_async_tag_fault() #define hw_force_async_tag_fault() arch_force_async_tag_fault()
#define hw_get_random_tag() arch_get_random_tag() #define hw_get_random_tag() arch_get_random_tag()
#define hw_get_mem_tag(addr) arch_get_mem_tag(addr) #define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
...@@ -314,6 +333,7 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) ...@@ -314,6 +333,7 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define hw_enable_tagging_sync() #define hw_enable_tagging_sync()
#define hw_enable_tagging_async() #define hw_enable_tagging_async()
#define hw_enable_tagging_asymm()
#endif /* CONFIG_KASAN_HW_TAGS */ #endif /* CONFIG_KASAN_HW_TAGS */
......
...@@ -112,7 +112,7 @@ static void start_report(unsigned long *flags) ...@@ -112,7 +112,7 @@ static void start_report(unsigned long *flags)
static void end_report(unsigned long *flags, unsigned long addr) static void end_report(unsigned long *flags, unsigned long addr)
{ {
if (!kasan_async_mode_enabled()) if (!kasan_async_fault_possible())
trace_error_report_end(ERROR_DETECTOR_KASAN, addr); trace_error_report_end(ERROR_DETECTOR_KASAN, addr);
pr_err("==================================================================\n"); pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment