Commit bf2b4af2 authored by Alexander Gordeev's avatar Alexander Gordeev Committed by Heiko Carstens

s390/kasan: use set_pXe_bit() for pgtable entries setup

Convert setup of pgtable entries to use set_pXe_bit()
helpers as the preferred way in MM code.

Locally introduce pgprot_clear_bit() helper, which is
strictly speaking a generic function. However, it is
only x86 pgprot_clear_protnone_bits() helper, which
does a similar thing, so do not make it public.
Reviewed-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent e148071b
...@@ -86,25 +86,32 @@ enum populate_mode { ...@@ -86,25 +86,32 @@ enum populate_mode {
POPULATE_ZERO_SHADOW, POPULATE_ZERO_SHADOW,
POPULATE_SHALLOW POPULATE_SHALLOW
}; };
static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
{
return __pgprot(pgprot_val(pgprot) & ~bit);
}
static void __init kasan_early_pgtable_populate(unsigned long address, static void __init kasan_early_pgtable_populate(unsigned long address,
unsigned long end, unsigned long end,
enum populate_mode mode) enum populate_mode mode)
{ {
unsigned long pgt_prot_zero, pgt_prot, sgt_prot; pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
pgprot_t pgt_prot = PAGE_KERNEL;
pgprot_t sgt_prot = SEGMENT_KERNEL;
pgd_t *pg_dir; pgd_t *pg_dir;
p4d_t *p4_dir; p4d_t *p4_dir;
pud_t *pu_dir; pud_t *pu_dir;
pmd_t *pm_dir; pmd_t *pm_dir;
pte_t *pt_dir; pte_t *pt_dir;
pmd_t pmd;
pte_t pte;
pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
if (!has_nx) if (!has_nx)
pgt_prot_zero &= ~_PAGE_NOEXEC; pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
pgt_prot = pgprot_val(PAGE_KERNEL);
sgt_prot = pgprot_val(SEGMENT_KERNEL);
if (!has_nx || mode == POPULATE_ONE2ONE) { if (!has_nx || mode == POPULATE_ONE2ONE) {
pgt_prot &= ~_PAGE_NOEXEC; pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC; sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
} }
/* /*
...@@ -175,7 +182,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address, ...@@ -175,7 +182,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
page = kasan_early_alloc_segment(); page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE); memset(page, 0, _SEGMENT_SIZE);
} }
set_pmd(pm_dir, __pmd(__pa(page) | sgt_prot)); pmd = __pmd(__pa(page));
pmd = set_pmd_bit(pmd, sgt_prot);
set_pmd(pm_dir, pmd);
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
continue; continue;
} }
...@@ -194,16 +203,22 @@ static void __init kasan_early_pgtable_populate(unsigned long address, ...@@ -194,16 +203,22 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
switch (mode) { switch (mode) {
case POPULATE_ONE2ONE: case POPULATE_ONE2ONE:
page = (void *)address; page = (void *)address;
set_pte(pt_dir, __pte(__pa(page) | pgt_prot)); pte = __pte(__pa(page));
pte = set_pte_bit(pte, pgt_prot);
set_pte(pt_dir, pte);
break; break;
case POPULATE_MAP: case POPULATE_MAP:
page = kasan_early_alloc_pages(0); page = kasan_early_alloc_pages(0);
memset(page, 0, PAGE_SIZE); memset(page, 0, PAGE_SIZE);
set_pte(pt_dir, __pte(__pa(page) | pgt_prot)); pte = __pte(__pa(page));
pte = set_pte_bit(pte, pgt_prot);
set_pte(pt_dir, pte);
break; break;
case POPULATE_ZERO_SHADOW: case POPULATE_ZERO_SHADOW:
page = kasan_early_shadow_page; page = kasan_early_shadow_page;
set_pte(pt_dir, __pte(__pa(page) | pgt_prot_zero)); pte = __pte(__pa(page));
pte = set_pte_bit(pte, pgt_prot_zero);
set_pte(pt_dir, pte);
break; break;
case POPULATE_SHALLOW: case POPULATE_SHALLOW:
/* should never happen */ /* should never happen */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment