Commit e006222b authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Martin Schwidefsky

s390/mm: optimize debugfs ptdump kasan zero page walking

Kasan zero p4d/pud/pmd/pte are always filled in with corresponding
kasan zero entries. Walking kasan zero page backed area is time
consuming and unnecessary. When kasan zero p4d/pud/pmd is encountered,
it eventually points to the kasan zero page always with the same
attributes and nothing but it, therefore zero p4d/pud/pmd could
be jumped over.

Also adds a space between address range and pages number to separate
them from each other when pages number is huge.

0x0018000000000000-0x0018000010000000       256M PMD RW X
0x0018000010000000-0x001bfffff0000000 1073741312M PTE RO X
0x001bfffff0000000-0x001bfffff0001000         4K PTE RW X
Reviewed-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 5dff0381
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kasan.h>
#include <asm/kasan.h> #include <asm/kasan.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -89,7 +90,7 @@ static void note_page(struct seq_file *m, struct pg_state *st, ...@@ -89,7 +90,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
} else if (prot != cur || level != st->level || } else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) { st->current_address >= st->marker[1].start_address) {
/* Print the actual finished series */ /* Print the actual finished series */
seq_printf(m, "0x%0*lx-0x%0*lx", seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address, width, st->start_address,
width, st->current_address); width, st->current_address);
delta = (st->current_address - st->start_address) >> 10; delta = (st->current_address - st->start_address) >> 10;
...@@ -109,6 +110,17 @@ static void note_page(struct seq_file *m, struct pg_state *st, ...@@ -109,6 +110,17 @@ static void note_page(struct seq_file *m, struct pg_state *st,
} }
} }
#ifdef CONFIG_KASAN
static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
{
unsigned int prot;
prot = pte_val(*kasan_zero_pte) &
(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
note_page(m, st, prot, 4);
}
#endif
/* /*
* The actual page table walker functions. In order to keep the * The actual page table walker functions. In order to keep the
* implementation of print_prot() short, we only check and pass * implementation of print_prot() short, we only check and pass
...@@ -141,6 +153,13 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, ...@@ -141,6 +153,13 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd; pmd_t *pmd;
int i; int i;
#ifdef CONFIG_KASAN
if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
note_kasan_zero_page(m, st);
return;
}
#endif
for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) { for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
st->current_address = addr; st->current_address = addr;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
...@@ -165,6 +184,13 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, ...@@ -165,6 +184,13 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud_t *pud; pud_t *pud;
int i; int i;
#ifdef CONFIG_KASAN
if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
note_kasan_zero_page(m, st);
return;
}
#endif
for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
st->current_address = addr; st->current_address = addr;
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
...@@ -188,6 +214,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, ...@@ -188,6 +214,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
p4d_t *p4d; p4d_t *p4d;
int i; int i;
#ifdef CONFIG_KASAN
if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
note_kasan_zero_page(m, st);
return;
}
#endif
for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) { for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
st->current_address = addr; st->current_address = addr;
p4d = p4d_offset(pgd, addr); p4d = p4d_offset(pgd, addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment