Commit 2563776b authored by Vincenzo Frascino's avatar Vincenzo Frascino Committed by Catalin Marinas

arm64: mte: Tags-aware copy_{user_,}highpage() implementations

When the Memory Tagging Extension is enabled, the tags need to be
preserved across page copy (e.g. for copy-on-write, page migration).

Introduce MTE-aware copy_{user_,}highpage() functions to copy tags to
the destination if the source page has the PG_mte_tagged flag set.
copy_user_page() does not need to handle tag copying since, with this
patch, it is only called by the DAX code where there is no source page
structure (and no source tags).
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Co-developed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
parent 34bfeea4
...@@ -19,6 +19,7 @@ void mte_clear_page_tags(void *addr); ...@@ -19,6 +19,7 @@ void mte_clear_page_tags(void *addr);
#define PG_mte_tagged PG_arch_2 #define PG_mte_tagged PG_arch_2
void mte_sync_tags(pte_t *ptep, pte_t pte); void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void flush_mte_state(void); void flush_mte_state(void);
#else #else
...@@ -29,6 +30,9 @@ void flush_mte_state(void); ...@@ -29,6 +30,9 @@ void flush_mte_state(void);
static inline void mte_sync_tags(pte_t *ptep, pte_t pte) static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
{ {
} }
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
{
}
static inline void flush_mte_state(void) static inline void flush_mte_state(void)
{ {
} }
......
...@@ -15,18 +15,26 @@ ...@@ -15,18 +15,26 @@
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */ #include <linux/personality.h> /* for READ_IMPLIES_EXEC */
#include <asm/pgtable-types.h> #include <asm/pgtable-types.h>
struct page;
struct vm_area_struct;
extern void __cpu_clear_user_page(void *p, unsigned long user); extern void __cpu_clear_user_page(void *p, unsigned long user);
extern void __cpu_copy_user_page(void *to, const void *from,
unsigned long user);
extern void copy_page(void *to, const void *from); extern void copy_page(void *to, const void *from);
extern void clear_page(void *to); extern void clear_page(void *to);
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
typedef struct page *pgtable_t; typedef struct page *pgtable_t;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/page.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
.arch armv8.5-a+memtag .arch armv8.5-a+memtag
...@@ -32,3 +33,21 @@ SYM_FUNC_START(mte_clear_page_tags) ...@@ -32,3 +33,21 @@ SYM_FUNC_START(mte_clear_page_tags)
b.ne 1b b.ne 1b
ret ret
SYM_FUNC_END(mte_clear_page_tags) SYM_FUNC_END(mte_clear_page_tags)
/*
* Copy the tags from the source page to the destination one
* x0 - address of the destination page
* x1 - address of the source page
*/
SYM_FUNC_START(mte_copy_page_tags)
mov x2, x0
mov x3, x1
multitag_transfer_size x5, x6
1: ldgm x4, [x3]
stgm x4, [x2]
add x2, x2, x5
add x3, x3, x5
tst x2, #(PAGE_SIZE - 1)
b.ne 1b
ret
SYM_FUNC_END(mte_copy_page_tags)
...@@ -6,18 +6,35 @@ ...@@ -6,18 +6,35 @@
* Copyright (C) 2012 ARM Ltd. * Copyright (C) 2012 ARM Ltd.
*/ */
#include <linux/bitops.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/mte.h>
void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) void copy_highpage(struct page *to, struct page *from)
{ {
struct page *page = virt_to_page(kto); struct page *kto = page_address(to);
struct page *kfrom = page_address(from);
copy_page(kto, kfrom); copy_page(kto, kfrom);
flush_dcache_page(page);
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
set_bit(PG_mte_tagged, &to->flags);
mte_copy_page_tags(kto, kfrom);
}
}
EXPORT_SYMBOL(copy_highpage);
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
copy_highpage(to, from);
flush_dcache_page(to);
} }
EXPORT_SYMBOL_GPL(__cpu_copy_user_page); EXPORT_SYMBOL_GPL(copy_user_highpage);
void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment