Commit dd061616 authored by Peter Collingbourne's avatar Peter Collingbourne Committed by Catalin Marinas

arm64: mte: introduce a per-CPU tag checking mode preference

Add a per-CPU sysfs node, mte_tcf_preferred, that allows the preferred
tag checking mode to be configured. The current possible values are
async and sync.

Link: https://linux-review.googlesource.com/id/I7493dcd533a2785a1437b16c3f6b50919f840854Signed-off-by: default avatarPeter Collingbourne <pcc@google.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20210727205300.2554659-5-pcc@google.comAcked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent d2e0d8f9
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/cpu.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/prctl.h> #include <linux/prctl.h>
...@@ -26,6 +27,8 @@ u64 gcr_kernel_excl __ro_after_init; ...@@ -26,6 +27,8 @@ u64 gcr_kernel_excl __ro_after_init;
static bool report_fault_once = true; static bool report_fault_once = true;
static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */ /* Whether the MTE asynchronous mode is enabled. */
DEFINE_STATIC_KEY_FALSE(mte_async_mode); DEFINE_STATIC_KEY_FALSE(mte_async_mode);
...@@ -195,11 +198,18 @@ void mte_check_tfsr_el1(void) ...@@ -195,11 +198,18 @@ void mte_check_tfsr_el1(void)
static void mte_update_sctlr_user(struct task_struct *task) static void mte_update_sctlr_user(struct task_struct *task)
{ {
/*
* This must be called with preemption disabled and can only be called
* on the current or next task since the CPU must match where the thread
* is going to run. The caller is responsible for calling
* update_sctlr_el1() later in the same preemption disabled block.
*/
unsigned long sctlr = task->thread.sctlr_user; unsigned long sctlr = task->thread.sctlr_user;
unsigned long pref = MTE_CTRL_TCF_ASYNC;
unsigned long mte_ctrl = task->thread.mte_ctrl; unsigned long mte_ctrl = task->thread.mte_ctrl;
unsigned long resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl; unsigned long pref, resolved_mte_tcf;
pref = __this_cpu_read(mte_tcf_preferred);
resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
sctlr &= ~SCTLR_EL1_TCF0_MASK; sctlr &= ~SCTLR_EL1_TCF0_MASK;
if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC) if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
sctlr |= SCTLR_EL1_TCF0_ASYNC; sctlr |= SCTLR_EL1_TCF0_ASYNC;
...@@ -438,3 +448,54 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request, ...@@ -438,3 +448,54 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
return ret; return ret;
} }
static ssize_t mte_tcf_preferred_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
switch (per_cpu(mte_tcf_preferred, dev->id)) {
case MTE_CTRL_TCF_ASYNC:
return sysfs_emit(buf, "async\n");
case MTE_CTRL_TCF_SYNC:
return sysfs_emit(buf, "sync\n");
default:
return sysfs_emit(buf, "???\n");
}
}
static ssize_t mte_tcf_preferred_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 tcf;
if (sysfs_streq(buf, "async"))
tcf = MTE_CTRL_TCF_ASYNC;
else if (sysfs_streq(buf, "sync"))
tcf = MTE_CTRL_TCF_SYNC;
else
return -EINVAL;
device_lock(dev);
per_cpu(mte_tcf_preferred, dev->id) = tcf;
device_unlock(dev);
return count;
}
static DEVICE_ATTR_RW(mte_tcf_preferred);
static int register_mte_tcf_preferred_sysctl(void)
{
unsigned int cpu;
if (!system_supports_mte())
return 0;
for_each_possible_cpu(cpu) {
per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
device_create_file(get_cpu_device(cpu),
&dev_attr_mte_tcf_preferred);
}
return 0;
}
subsys_initcall(register_mte_tcf_preferred_sysctl);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment