Commit df48ab3c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Nothing particularly interesting here, but all important fixes
  nonetheless:

   - Add missing PAN toggling in the futex code

   - Fix missing #include that briefly caused issues in -next

   - Allow changing of vmalloc permissions with set_memory_* (used by
     bpf)"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: asm: Explicitly include linux/personality.h in asm/page.h
  arm64: futex.h: Add missing PAN toggling
  arm64: allow vmalloc regions to be set with set_memory_*
parents 5787c252 3e275c64
...@@ -121,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -121,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n" asm volatile("// futex_atomic_cmpxchg_inatomic\n"
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" prfm pstl1strm, %2\n" " prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n" "1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n" " sub %w3, %w1, %w4\n"
...@@ -137,6 +138,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -137,6 +138,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .align 3\n" " .align 3\n"
" .quad 1b, 4b, 2b, 4b\n" " .quad 1b, 4b, 2b, 4b\n"
" .popsection\n" " .popsection\n"
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "memory"); : "memory");
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
#include <asm/pgtable-types.h> #include <asm/pgtable-types.h>
extern void __cpu_clear_user_page(void *p, unsigned long user); extern void __cpu_clear_user_page(void *p, unsigned long user);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -44,6 +45,7 @@ static int change_memory_common(unsigned long addr, int numpages, ...@@ -44,6 +45,7 @@ static int change_memory_common(unsigned long addr, int numpages,
unsigned long end = start + size; unsigned long end = start + size;
int ret; int ret;
struct page_change_data data; struct page_change_data data;
struct vm_struct *area;
if (!PAGE_ALIGNED(addr)) { if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK; start &= PAGE_MASK;
...@@ -51,10 +53,23 @@ static int change_memory_common(unsigned long addr, int numpages, ...@@ -51,10 +53,23 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }
if (start < MODULES_VADDR || start >= MODULES_END) /*
return -EINVAL; * Kernel VA mappings are always live, and splitting live section
* mappings into page mappings may cause TLB conflicts. This means
if (end < MODULES_VADDR || end >= MODULES_END) * we have to ensure that changing the permission bits of the range
* we are operating on does not result in such splitting.
*
* Let's restrict ourselves to mappings created by vmalloc (or vmap).
* Those are guaranteed to consist entirely of page mappings, and
* splitting is never needed.
*
* So check whether the [addr, addr + size) interval is entirely
* covered by precisely one VM area that has the VM_ALLOC flag set.
*/
area = find_vm_area((void *)addr);
if (!area ||
end > (unsigned long)area->addr + area->size ||
!(area->flags & VM_ALLOC))
return -EINVAL; return -EINVAL;
if (!numpages) if (!numpages)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment