Commit 428e106a authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Dave Hansen

mm: Introduce untagged_addr_remote()

untagged_addr() removes tags/metadata from the address and brings it to
the canonical form. The helper is implemented on arm64 and sparc. Both of
them do untagging based on global rules.

However, Linear Address Masking (LAM) on x86 introduces per-process
settings for untagging. As a result, untagged_addr() is now only
suitable for untagging addresses for the current proccess.

The new helper untagged_addr_remote() has to be used when the address
targets remote process. It requires the mmap lock for target mm to be
taken.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarAlexander Potapenko <glider@google.com>
Link: https://lore.kernel.org/all/20230312112612.31869-6-kirill.shutemov%40linux.intel.com
parent 82721d8b
...@@ -8,8 +8,10 @@ ...@@ -8,8 +8,10 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm_types.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm-generic/access_ok.h> #include <asm-generic/access_ok.h>
......
...@@ -580,7 +580,7 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, ...@@ -580,7 +580,7 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
goto done; goto done;
} }
vaddr = untagged_addr(vaddr); vaddr = untagged_addr_remote(mm, vaddr);
retry: retry:
vma = vma_lookup(mm, vaddr); vma = vma_lookup(mm, vaddr);
......
...@@ -1689,8 +1689,13 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, ...@@ -1689,8 +1689,13 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
/* watch out for wraparound */ /* watch out for wraparound */
start_vaddr = end_vaddr; start_vaddr = end_vaddr;
if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
start_vaddr = untagged_addr(svpfn << PAGE_SHIFT); ret = mmap_read_lock_killable(mm);
if (ret)
goto out_free;
start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
mmap_read_unlock(mm);
}
/* Ensure the address is inside the task */ /* Ensure the address is inside the task */
if (start_vaddr > mm->task_size) if (start_vaddr > mm->task_size)
......
...@@ -96,17 +96,6 @@ extern int mmap_rnd_compat_bits __read_mostly; ...@@ -96,17 +96,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
/*
* Architectures that support memory tagging (assigning tags to memory regions,
* embedding these tags into addresses that point to these memory regions, and
* checking that the memory and the pointer tags match on memory accesses)
* redefine this macro to strip tags from pointers.
* It's defined as noop for architectures that don't support memory tagging.
*/
#ifndef untagged_addr
#define untagged_addr(addr) (addr)
#endif
#ifndef __pa_symbol #ifndef __pa_symbol
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif #endif
......
...@@ -10,6 +10,28 @@ ...@@ -10,6 +10,28 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
/*
* Architectures that support memory tagging (assigning tags to memory regions,
* embedding these tags into addresses that point to these memory regions, and
* checking that the memory and the pointer tags match on memory accesses)
* redefine this macro to strip tags from pointers.
*
* Passing down mm_struct allows to define untagging rules on per-process
* basis.
*
* It's defined as noop for architectures that don't support memory tagging.
*/
#ifndef untagged_addr
#define untagged_addr(addr) (addr)
#endif
#ifndef untagged_addr_remote
#define untagged_addr_remote(mm, addr) ({ \
mmap_assert_locked(mm); \
untagged_addr(addr); \
})
#endif
/* /*
* Architectures should provide two primitives (raw_copy_{to,from}_user()) * Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and * and get rid of their private instances of copy_{to,from}_user() and
......
...@@ -1085,7 +1085,7 @@ static long __get_user_pages(struct mm_struct *mm, ...@@ -1085,7 +1085,7 @@ static long __get_user_pages(struct mm_struct *mm,
if (!nr_pages) if (!nr_pages)
return 0; return 0;
start = untagged_addr(start); start = untagged_addr_remote(mm, start);
VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
...@@ -1259,7 +1259,7 @@ int fixup_user_fault(struct mm_struct *mm, ...@@ -1259,7 +1259,7 @@ int fixup_user_fault(struct mm_struct *mm,
struct vm_area_struct *vma; struct vm_area_struct *vma;
vm_fault_t ret; vm_fault_t ret;
address = untagged_addr(address); address = untagged_addr_remote(mm, address);
if (unlocked) if (unlocked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
......
...@@ -1402,8 +1402,6 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh ...@@ -1402,8 +1402,6 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
size_t len; size_t len;
struct blk_plug plug; struct blk_plug plug;
start = untagged_addr(start);
if (!madvise_behavior_valid(behavior)) if (!madvise_behavior_valid(behavior))
return -EINVAL; return -EINVAL;
...@@ -1435,6 +1433,9 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh ...@@ -1435,6 +1433,9 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh
mmap_read_lock(mm); mmap_read_lock(mm);
} }
start = untagged_addr_remote(mm, start);
end = start + len;
blk_start_plug(&plug); blk_start_plug(&plug);
error = madvise_walk_vmas(mm, start, end, behavior, error = madvise_walk_vmas(mm, start, end, behavior,
madvise_vma_behavior); madvise_vma_behavior);
......
...@@ -2097,15 +2097,18 @@ static int do_move_pages_to_node(struct mm_struct *mm, ...@@ -2097,15 +2097,18 @@ static int do_move_pages_to_node(struct mm_struct *mm,
* target node * target node
* 1 - when it has been queued * 1 - when it has been queued
*/ */
static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
int node, struct list_head *pagelist, bool migrate_all) int node, struct list_head *pagelist, bool migrate_all)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long addr;
struct page *page; struct page *page;
int err; int err;
bool isolated; bool isolated;
mmap_read_lock(mm); mmap_read_lock(mm);
addr = (unsigned long)untagged_addr_remote(mm, p);
err = -EFAULT; err = -EFAULT;
vma = vma_lookup(mm, addr); vma = vma_lookup(mm, addr);
if (!vma || !vma_migratable(vma)) if (!vma || !vma_migratable(vma))
...@@ -2211,7 +2214,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, ...@@ -2211,7 +2214,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
for (i = start = 0; i < nr_pages; i++) { for (i = start = 0; i < nr_pages; i++) {
const void __user *p; const void __user *p;
unsigned long addr;
int node; int node;
err = -EFAULT; err = -EFAULT;
...@@ -2219,7 +2221,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, ...@@ -2219,7 +2221,6 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
goto out_flush; goto out_flush;
if (get_user(node, nodes + i)) if (get_user(node, nodes + i))
goto out_flush; goto out_flush;
addr = (unsigned long)untagged_addr(p);
err = -ENODEV; err = -ENODEV;
if (node < 0 || node >= MAX_NUMNODES) if (node < 0 || node >= MAX_NUMNODES)
...@@ -2247,8 +2248,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, ...@@ -2247,8 +2248,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
* Errors in the page lookup or isolation are not fatal and we simply * Errors in the page lookup or isolation are not fatal and we simply
* report them via status * report them via status
*/ */
err = add_page_for_migration(mm, addr, current_node, err = add_page_for_migration(mm, p, current_node, &pagelist,
&pagelist, flags & MPOL_MF_MOVE_ALL); flags & MPOL_MF_MOVE_ALL);
if (err > 0) { if (err > 0) {
/* The page is successfully queued for migration */ /* The page is successfully queued for migration */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment