Commit 2cb34276 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

arm64: kasan: simplify and inline MTE functions

This change provides a simpler implementation of mte_get_mem_tag(),
mte_get_random_tag(), and mte_set_mem_tag_range().

Simplifications include removing system_supports_mte() checks as these
functions are onlye called from KASAN runtime that had already checked
system_supports_mte().  Besides that, size and address alignment checks
are removed from mte_set_mem_tag_range(), as KASAN now does those.

This change also moves these functions into the asm/mte-kasan.h header and
implements mte_set_mem_tag_range() via inline assembly to avoid
unnecessary functions calls.

[vincenzo.frascino@arm.com: fix warning in mte_get_random_tag()]
  Link: https://lkml.kernel.org/r/20210211152208.23811-1-vincenzo.frascino@arm.com

Link: https://lkml.kernel.org/r/a26121b294fdf76e369cb7a74351d1c03a908930.1612546384.git.andreyknvl@google.comCo-developed-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Marco Elver <elver@google.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cde8a7eb
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#define __ASM_CACHE_H #define __ASM_CACHE_H
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/mte-kasan.h>
#define CTR_L1IP_SHIFT 14 #define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3 #define CTR_L1IP_MASK 3
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mte-kasan.h>
#include <asm/pgtable-types.h> #include <asm/pgtable-types.h>
#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag) #define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
......
...@@ -11,4 +11,6 @@ ...@@ -11,4 +11,6 @@
#define MTE_TAG_SIZE 4 #define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT) #define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
#endif /* __ASM_MTE_DEF_H */ #endif /* __ASM_MTE_DEF_H */
...@@ -11,11 +11,14 @@ ...@@ -11,11 +11,14 @@
#include <linux/types.h> #include <linux/types.h>
#ifdef CONFIG_ARM64_MTE
/* /*
* The functions below are meant to be used only for the * These functions are meant to be only used from KASAN runtime through
* KASAN_HW_TAGS interface defined in asm/memory.h. * the arch_*() interface defined in asm/memory.h.
* These functions don't include system_supports_mte() checks,
* as KASAN only calls them when MTE is supported and enabled.
*/ */
#ifdef CONFIG_ARM64_MTE
static inline u8 mte_get_ptr_tag(void *ptr) static inline u8 mte_get_ptr_tag(void *ptr)
{ {
...@@ -25,9 +28,54 @@ static inline u8 mte_get_ptr_tag(void *ptr) ...@@ -25,9 +28,54 @@ static inline u8 mte_get_ptr_tag(void *ptr)
return tag; return tag;
} }
u8 mte_get_mem_tag(void *addr); /* Get allocation tag for the address. */
u8 mte_get_random_tag(void); static inline u8 mte_get_mem_tag(void *addr)
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag); {
asm(__MTE_PREAMBLE "ldg %0, [%0]"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
/* Generate a random tag. */
static inline u8 mte_get_random_tag(void)
{
void *addr;
asm(__MTE_PREAMBLE "irg %0, %0"
: "=r" (addr));
return mte_get_ptr_tag(addr);
}
/*
* Assign allocation tags for a region of memory based on the pointer tag.
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
* size must be non-zero and MTE_GRANULE_SIZE aligned.
*/
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
u64 curr, end;
if (!size)
return;
curr = (u64)__tag_set(addr, tag);
end = curr + size;
do {
/*
* 'asm volatile' is required to prevent the compiler to move
* the statement outside of the loop.
*/
asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
:
: "r" (curr)
: "memory");
curr += MTE_GRANULE_SIZE;
} while (curr != end);
}
void mte_enable_kernel(void); void mte_enable_kernel(void);
void mte_init_tags(u64 max_tag); void mte_init_tags(u64 max_tag);
...@@ -46,13 +94,14 @@ static inline u8 mte_get_mem_tag(void *addr) ...@@ -46,13 +94,14 @@ static inline u8 mte_get_mem_tag(void *addr)
{ {
return 0xFF; return 0xFF;
} }
static inline u8 mte_get_random_tag(void) static inline u8 mte_get_random_tag(void)
{ {
return 0xFF; return 0xFF;
} }
static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{ {
return addr;
} }
static inline void mte_enable_kernel(void) static inline void mte_enable_kernel(void)
......
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/mte-def.h> #include <asm/mte-def.h>
#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/bitfield.h> #include <linux/bitfield.h>
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/mte.h> #include <asm/mte.h>
#include <asm/mte-kasan.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
...@@ -88,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2) ...@@ -88,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
return ret; return ret;
} }
u8 mte_get_mem_tag(void *addr)
{
if (!system_supports_mte())
return 0xFF;
asm(__MTE_PREAMBLE "ldg %0, [%0]"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
u8 mte_get_random_tag(void)
{
void *addr;
if (!system_supports_mte())
return 0xFF;
asm(__MTE_PREAMBLE "irg %0, %0"
: "+r" (addr));
return mte_get_ptr_tag(addr);
}
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
{
void *ptr = addr;
if ((!system_supports_mte()) || (size == 0))
return addr;
/* Make sure that size is MTE granule aligned. */
WARN_ON(size & (MTE_GRANULE_SIZE - 1));
/* Make sure that the address is MTE granule aligned. */
WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
tag = 0xF0 | tag;
ptr = (void *)__tag_set(ptr, tag);
mte_assign_mem_tag_range(ptr, size);
return ptr;
}
void mte_init_tags(u64 max_tag) void mte_init_tags(u64 max_tag)
{ {
static bool gcr_kernel_excl_initialized; static bool gcr_kernel_excl_initialized;
......
...@@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags) ...@@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags)
ret ret
SYM_FUNC_END(mte_restore_page_tags) SYM_FUNC_END(mte_restore_page_tags)
/*
* Assign allocation tags for a region of memory based on the pointer tag
* x0 - source pointer
* x1 - size
*
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
* size must be non-zero and MTE_GRANULE_SIZE aligned.
*/
SYM_FUNC_START(mte_assign_mem_tag_range)
1: stg x0, [x0]
add x0, x0, #MTE_GRANULE_SIZE
subs x1, x1, #MTE_GRANULE_SIZE
b.gt 1b
ret
SYM_FUNC_END(mte_assign_mem_tag_range)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment