Commit 5ad6e7ba authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - Another fix for rodata=full. Since rodata= is not a simple boolean on
   arm64 (accepting 'full' as well), it got inadvertently broken by
   changes in the core code. If rodata=on is the default and rodata=off
   is passed on the kernel command line, rodata_full is never disabled

 - Fix gcc compiler warning of shifting 0xc0 into bits 31:24 without an
   explicit conversion to u32 (triggered by the AMPERE1 MIDR definition)

 - Include asm/ptrace.h in asm/syscall_wrapper.h to fix an incomplete
   struct pt_regs type causing the BPF verifier to refuse to load a
   tracing program which accesses pt_regs

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/syscall: Include asm/ptrace.h in syscall_wrapper header.
  arm64: Fix bit-shifting UB in the MIDR_CPU_MODEL() macro
  arm64: fix rodata=full again
parents b0b6e2c9 acfc35cf
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_CPU_MODEL(imp, partnum) \ #define MIDR_CPU_MODEL(imp, partnum) \
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ ((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \ (0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT)) ((partnum) << MIDR_PARTNUM_SHIFT))
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#ifndef __ASM_SYSCALL_WRAPPER_H #ifndef __ASM_SYSCALL_WRAPPER_H
#define __ASM_SYSCALL_WRAPPER_H #define __ASM_SYSCALL_WRAPPER_H
struct pt_regs; #include <asm/ptrace.h>
#define SC_ARM64_REGS_TO_ARGS(x, ...) \ #define SC_ARM64_REGS_TO_ARGS(x, ...) \
__MAP(x,__SC_ARGS \ __MAP(x,__SC_ARGS \
......
...@@ -26,7 +26,7 @@ bool can_set_direct_map(void) ...@@ -26,7 +26,7 @@ bool can_set_direct_map(void)
* mapped at page granularity, so that it is possible to * mapped at page granularity, so that it is possible to
* protect/unprotect single pages. * protect/unprotect single pages.
*/ */
return rodata_full || debug_pagealloc_enabled() || return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE); IS_ENABLED(CONFIG_KFENCE);
} }
...@@ -102,7 +102,8 @@ static int change_memory_common(unsigned long addr, int numpages, ...@@ -102,7 +102,8 @@ static int change_memory_common(unsigned long addr, int numpages,
* If we are manipulating read-only permissions, apply the same * If we are manipulating read-only permissions, apply the same
* change to the linear mapping of the pages that back this VM area. * change to the linear mapping of the pages that back this VM area.
*/ */
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || if (rodata_enabled &&
rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) { pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) { for (i = 0; i < area->nr_pages; i++) {
__change_memory_common((u64)page_address(area->pages[i]), __change_memory_common((u64)page_address(area->pages[i]),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment