Commit 98219dda authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar

x86/mm: Fold p4d page table layer at runtime

Change page table helpers to fold p4d at runtime.
The logic is the same as in <asm-generic/pgtable-nop4d.h>.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20180214182542.69302-8-kirill.shutemov@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6f9dd329
...@@ -569,13 +569,15 @@ static inline p4dval_t p4d_val(p4d_t p4d) ...@@ -569,13 +569,15 @@ static inline p4dval_t p4d_val(p4d_t p4d)
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{ {
pgdval_t val = native_pgd_val(pgd); if (pgtable_l5_enabled)
PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd));
PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val); else
set_p4d((p4d_t *)(pgdp), (p4d_t) { pgd.pgd });
} }
static inline void pgd_clear(pgd_t *pgdp) static inline void pgd_clear(pgd_t *pgdp)
{ {
if (pgtable_l5_enabled)
set_pgd(pgdp, __pgd(0)); set_pgd(pgdp, __pgd(0));
} }
......
...@@ -167,6 +167,8 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -167,6 +167,8 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#if CONFIG_PGTABLE_LEVELS > 4 #if CONFIG_PGTABLE_LEVELS > 4
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
{ {
if (!pgtable_l5_enabled)
return;
paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
} }
...@@ -191,6 +193,7 @@ extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d); ...@@ -191,6 +193,7 @@ extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long address) unsigned long address)
{ {
if (pgtable_l5_enabled)
___p4d_free_tlb(tlb, p4d); ___p4d_free_tlb(tlb, p4d);
} }
......
...@@ -65,7 +65,7 @@ extern pmdval_t early_pmd_flags; ...@@ -65,7 +65,7 @@ extern pmdval_t early_pmd_flags;
#ifndef __PAGETABLE_P4D_FOLDED #ifndef __PAGETABLE_P4D_FOLDED
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd) native_pgd_clear(pgd) #define pgd_clear(pgd) (pgtable_l5_enabled ? native_pgd_clear(pgd) : 0)
#endif #endif
#ifndef set_p4d #ifndef set_p4d
...@@ -859,6 +859,8 @@ static inline unsigned long p4d_index(unsigned long address) ...@@ -859,6 +859,8 @@ static inline unsigned long p4d_index(unsigned long address)
#if CONFIG_PGTABLE_LEVELS > 4 #if CONFIG_PGTABLE_LEVELS > 4
static inline int pgd_present(pgd_t pgd) static inline int pgd_present(pgd_t pgd)
{ {
if (!pgtable_l5_enabled)
return 1;
return pgd_flags(pgd) & _PAGE_PRESENT; return pgd_flags(pgd) & _PAGE_PRESENT;
} }
...@@ -876,6 +878,8 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) ...@@ -876,6 +878,8 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
/* to find an entry in a page-table-directory. */ /* to find an entry in a page-table-directory. */
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{ {
if (!pgtable_l5_enabled)
return (p4d_t *)pgd;
return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
} }
...@@ -883,6 +887,9 @@ static inline int pgd_bad(pgd_t pgd) ...@@ -883,6 +887,9 @@ static inline int pgd_bad(pgd_t pgd)
{ {
unsigned long ignore_flags = _PAGE_USER; unsigned long ignore_flags = _PAGE_USER;
if (!pgtable_l5_enabled)
return 0;
if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
ignore_flags |= _PAGE_NX; ignore_flags |= _PAGE_NX;
...@@ -891,6 +898,8 @@ static inline int pgd_bad(pgd_t pgd) ...@@ -891,6 +898,8 @@ static inline int pgd_bad(pgd_t pgd)
static inline int pgd_none(pgd_t pgd) static inline int pgd_none(pgd_t pgd)
{ {
if (!pgtable_l5_enabled)
return 0;
/* /*
* There is no need to do a workaround for the KNL stray * There is no need to do a workaround for the KNL stray
* A/D bit erratum here. PGDs only point to page tables * A/D bit erratum here. PGDs only point to page tables
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment