Commit cd7f176a authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

arm64/mm: try VMA lock-based page fault handling first

Attempt VMA lock-based page fault handling first, and fall back to the
existing mmap_lock-based handling if that fails.

Link: https://lkml.kernel.org/r/20230227173632.3292573-31-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0bff0aae
...@@ -95,6 +95,7 @@ config ARM64 ...@@ -95,6 +95,7 @@ config ARM64
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PAGE_TABLE_CHECK select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
......
...@@ -535,6 +535,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -535,6 +535,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned long vm_flags; unsigned long vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far); unsigned long addr = untagged_addr(far);
#ifdef CONFIG_PER_VMA_LOCK
struct vm_area_struct *vma;
#endif
if (kprobe_page_fault(regs, esr)) if (kprobe_page_fault(regs, esr))
return 0; return 0;
...@@ -585,6 +588,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -585,6 +588,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
#ifdef CONFIG_PER_VMA_LOCK
if (!(mm_flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, addr);
if (!vma)
goto lock_mmap;
if (!(vma->vm_flags & vm_flags)) {
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, addr & PAGE_MASK,
mm_flags | FAULT_FLAG_VMA_LOCK, regs);
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return 0;
}
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
/* /*
* As per x86, we may deadlock here. However, since the kernel only * As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code, * validly references user space from well defined areas of the code,
...@@ -628,6 +661,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -628,6 +661,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
} }
mmap_read_unlock(mm); mmap_read_unlock(mm);
#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
/* /*
* Handle the "normal" (no error) case first. * Handle the "normal" (no error) case first.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment