Commit 1404d6f1 authored by Laura Abbott's avatar Laura Abbott Committed by Catalin Marinas

arm64: dump: Add checking for writable and exectuable pages

Page mappings with full RWX permissions are a security risk. x86
has an option to walk the page tables and dump any bad pages.
(See e1a58320 ("x86/mm: Warn on W^X mappings")). Add a similar
implementation for arm64.
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarLaura Abbott <labbott@redhat.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
[catalin.marinas@arm.com: folded fix for KASan out of bounds from Mark Rutland]
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent cfd69e95
...@@ -42,6 +42,35 @@ config ARM64_RANDOMIZE_TEXT_OFFSET ...@@ -42,6 +42,35 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
of TEXT_OFFSET and platforms must not require a specific of TEXT_OFFSET and platforms must not require a specific
value. value.
config DEBUG_WX
bool "Warn on W+X mappings at boot"
select ARM64_PTDUMP_CORE
---help---
Generate a warning if any W+X mappings are found at boot.
This is useful for discovering cases where the kernel is leaving
W+X mappings after applying NX, as such mappings are a security risk.
This check also includes UXN, which should be set on all kernel
mappings.
Look for a message in dmesg output like this:
arm64/mm: Checked W+X mappings: passed, no W+X pages found.
or like this, if the check failed:
arm64/mm: Checked W+X mappings: FAILED, <N> W+X pages found.
Note that even if the check fails, your kernel is possibly
still fine, as W+X mappings are not a security hole in
themselves, what they do is that they make the exploitation
of other unfixed kernel bugs easier.
There is no runtime or memory usage effect of this option
once the kernel has booted up - it's a one time check.
If in doubt, say "Y".
config DEBUG_SET_MODULE_RONX config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO" bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES depends on MODULES
......
...@@ -42,5 +42,13 @@ static inline int ptdump_debugfs_register(struct ptdump_info *info, ...@@ -42,5 +42,13 @@ static inline int ptdump_debugfs_register(struct ptdump_info *info,
return 0; return 0;
} }
#endif #endif
void ptdump_check_wx(void);
#endif /* CONFIG_ARM64_PTDUMP_CORE */ #endif /* CONFIG_ARM64_PTDUMP_CORE */
#ifdef CONFIG_DEBUG_WX
#define debug_checkwx() ptdump_check_wx()
#else
#define debug_checkwx() do { } while (0)
#endif
#endif /* __ASM_PTDUMP_H */ #endif /* __ASM_PTDUMP_H */
...@@ -74,6 +74,9 @@ struct pg_state { ...@@ -74,6 +74,9 @@ struct pg_state {
unsigned long start_address; unsigned long start_address;
unsigned level; unsigned level;
u64 current_prot; u64 current_prot;
bool check_wx;
unsigned long wx_pages;
unsigned long uxn_pages;
}; };
struct prot_bits { struct prot_bits {
...@@ -202,6 +205,35 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits, ...@@ -202,6 +205,35 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
} }
} }
static void note_prot_uxn(struct pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;
if ((st->current_prot & PTE_UXN) == PTE_UXN)
return;
WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
(void *)st->start_address, (void *)st->start_address);
st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;
if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
return;
if ((st->current_prot & PTE_PXN) == PTE_PXN)
return;
WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
(void *)st->start_address, (void *)st->start_address);
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
static void note_page(struct pg_state *st, unsigned long addr, unsigned level, static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
u64 val) u64 val)
{ {
...@@ -219,6 +251,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, ...@@ -219,6 +251,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
unsigned long delta; unsigned long delta;
if (st->current_prot) { if (st->current_prot) {
note_prot_uxn(st, addr);
note_prot_wx(st, addr);
pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ", pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
st->start_address, addr); st->start_address, addr);
...@@ -344,6 +378,26 @@ static struct ptdump_info kernel_ptdump_info = { ...@@ -344,6 +378,26 @@ static struct ptdump_info kernel_ptdump_info = {
.base_addr = VA_START, .base_addr = VA_START,
}; };
void ptdump_check_wx(void)
{
struct pg_state st = {
.seq = NULL,
.marker = (struct addr_marker[]) {
{ 0, NULL},
{ -1, NULL},
},
.check_wx = true,
};
walk_pgd(&st, &init_mm, 0);
note_page(&st, 0, 0, 0);
if (st.wx_pages || st.uxn_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
st.wx_pages, st.uxn_pages);
else
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}
static int ptdump_init(void) static int ptdump_init(void)
{ {
ptdump_initialize(); ptdump_initialize();
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/memblock.h> #include <asm/memblock.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/ptdump.h>
u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
...@@ -438,6 +439,8 @@ void mark_rodata_ro(void) ...@@ -438,6 +439,8 @@ void mark_rodata_ro(void)
/* flush the TLBs after updating live kernel mappings */ /* flush the TLBs after updating live kernel mappings */
flush_tlb_all(); flush_tlb_all();
debug_checkwx();
} }
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment