Commit 98233368 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

x86: expose number of page table levels on Kconfig level

We would want to use number of page table level to define mm_struct.
Let's expose it as CONFIG_PGTABLE_LEVELS.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Tested-by: default avatarGuenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6b8ce2a1
...@@ -277,6 +277,12 @@ config ARCH_SUPPORTS_UPROBES ...@@ -277,6 +277,12 @@ config ARCH_SUPPORTS_UPROBES
config FIX_EARLYCON_MEM config FIX_EARLYCON_MEM
def_bool y def_bool y
config PGTABLE_LEVELS
int
default 4 if X86_64
default 3 if X86_PAE
default 2
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer" source "kernel/Kconfig.freezer"
......
...@@ -545,7 +545,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) ...@@ -545,7 +545,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
} }
#if PAGETABLE_LEVELS >= 3 #if CONFIG_PGTABLE_LEVELS >= 3
static inline pmd_t __pmd(pmdval_t val) static inline pmd_t __pmd(pmdval_t val)
{ {
pmdval_t ret; pmdval_t ret;
...@@ -585,7 +585,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud) ...@@ -585,7 +585,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
val); val);
} }
#if PAGETABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
static inline pud_t __pud(pudval_t val) static inline pud_t __pud(pudval_t val)
{ {
pudval_t ret; pudval_t ret;
...@@ -636,9 +636,9 @@ static inline void pud_clear(pud_t *pudp) ...@@ -636,9 +636,9 @@ static inline void pud_clear(pud_t *pudp)
set_pud(pudp, __pud(0)); set_pud(pudp, __pud(0));
} }
#endif /* PAGETABLE_LEVELS == 4 */ #endif /* CONFIG_PGTABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */ #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a /* Special-case pte-setting operations for PAE, which can't update a
......
...@@ -294,7 +294,7 @@ struct pv_mmu_ops { ...@@ -294,7 +294,7 @@ struct pv_mmu_ops {
struct paravirt_callee_save pgd_val; struct paravirt_callee_save pgd_val;
struct paravirt_callee_save make_pgd; struct paravirt_callee_save make_pgd;
#if PAGETABLE_LEVELS >= 3 #if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*pte_clear)(struct mm_struct *mm, unsigned long addr, void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
...@@ -308,13 +308,13 @@ struct pv_mmu_ops { ...@@ -308,13 +308,13 @@ struct pv_mmu_ops {
struct paravirt_callee_save pmd_val; struct paravirt_callee_save pmd_val;
struct paravirt_callee_save make_pmd; struct paravirt_callee_save make_pmd;
#if PAGETABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
struct paravirt_callee_save pud_val; struct paravirt_callee_save pud_val;
struct paravirt_callee_save make_pud; struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
#endif /* PAGETABLE_LEVELS == 4 */ #endif /* CONFIG_PGTABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */ #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
struct pv_lazy_ops lazy_mode; struct pv_lazy_ops lazy_mode;
......
...@@ -77,7 +77,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, ...@@ -77,7 +77,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) pmd_page(pmd)
#if PAGETABLE_LEVELS > 2 #if CONFIG_PGTABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
struct page *page; struct page *page;
...@@ -116,7 +116,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ...@@ -116,7 +116,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
} }
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
#if PAGETABLE_LEVELS > 3 #if CONFIG_PGTABLE_LEVELS > 3
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{ {
paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
...@@ -142,7 +142,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -142,7 +142,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
___pud_free_tlb(tlb, pud); ___pud_free_tlb(tlb, pud);
} }
#endif /* PAGETABLE_LEVELS > 3 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */
#endif /* PAGETABLE_LEVELS > 2 */ #endif /* CONFIG_PGTABLE_LEVELS > 2 */
#endif /* _ASM_X86_PGALLOC_H */ #endif /* _ASM_X86_PGALLOC_H */
...@@ -17,7 +17,6 @@ typedef union { ...@@ -17,7 +17,6 @@ typedef union {
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0 #define SHARED_KERNEL_PMD 0
#define PAGETABLE_LEVELS 2
/* /*
* traditional i386 two-level paging structure: * traditional i386 two-level paging structure:
......
...@@ -24,8 +24,6 @@ typedef union { ...@@ -24,8 +24,6 @@ typedef union {
#define SHARED_KERNEL_PMD 1 #define SHARED_KERNEL_PMD 1
#endif #endif
#define PAGETABLE_LEVELS 3
/* /*
* PGDIR_SHIFT determines what a top-level page table entry can map * PGDIR_SHIFT determines what a top-level page table entry can map
*/ */
......
...@@ -551,7 +551,7 @@ static inline unsigned long pages_to_mb(unsigned long npg) ...@@ -551,7 +551,7 @@ static inline unsigned long pages_to_mb(unsigned long npg)
return npg >> (20 - PAGE_SHIFT); return npg >> (20 - PAGE_SHIFT);
} }
#if PAGETABLE_LEVELS > 2 #if CONFIG_PGTABLE_LEVELS > 2
static inline int pud_none(pud_t pud) static inline int pud_none(pud_t pud)
{ {
return native_pud_val(pud) == 0; return native_pud_val(pud) == 0;
...@@ -594,9 +594,9 @@ static inline int pud_large(pud_t pud) ...@@ -594,9 +594,9 @@ static inline int pud_large(pud_t pud)
{ {
return 0; return 0;
} }
#endif /* PAGETABLE_LEVELS > 2 */ #endif /* CONFIG_PGTABLE_LEVELS > 2 */
#if PAGETABLE_LEVELS > 3 #if CONFIG_PGTABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd) static inline int pgd_present(pgd_t pgd)
{ {
return pgd_flags(pgd) & _PAGE_PRESENT; return pgd_flags(pgd) & _PAGE_PRESENT;
...@@ -633,7 +633,7 @@ static inline int pgd_none(pgd_t pgd) ...@@ -633,7 +633,7 @@ static inline int pgd_none(pgd_t pgd)
{ {
return !native_pgd_val(pgd); return !native_pgd_val(pgd);
} }
#endif /* PAGETABLE_LEVELS > 3 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -20,7 +20,6 @@ typedef struct { pteval_t pte; } pte_t; ...@@ -20,7 +20,6 @@ typedef struct { pteval_t pte; } pte_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0 #define SHARED_KERNEL_PMD 0
#define PAGETABLE_LEVELS 4
/* /*
* PGDIR_SHIFT determines what a top-level page table entry can map * PGDIR_SHIFT determines what a top-level page table entry can map
......
...@@ -234,7 +234,7 @@ static inline pgdval_t pgd_flags(pgd_t pgd) ...@@ -234,7 +234,7 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
return native_pgd_val(pgd) & PTE_FLAGS_MASK; return native_pgd_val(pgd) & PTE_FLAGS_MASK;
} }
#if PAGETABLE_LEVELS > 3 #if CONFIG_PGTABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t; typedef struct { pudval_t pud; } pud_t;
static inline pud_t native_make_pud(pmdval_t val) static inline pud_t native_make_pud(pmdval_t val)
...@@ -255,7 +255,7 @@ static inline pudval_t native_pud_val(pud_t pud) ...@@ -255,7 +255,7 @@ static inline pudval_t native_pud_val(pud_t pud)
} }
#endif #endif
#if PAGETABLE_LEVELS > 2 #if CONFIG_PGTABLE_LEVELS > 2
typedef struct { pmdval_t pmd; } pmd_t; typedef struct { pmdval_t pmd; } pmd_t;
static inline pmd_t native_make_pmd(pmdval_t val) static inline pmd_t native_make_pmd(pmdval_t val)
......
...@@ -443,7 +443,7 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -443,7 +443,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit, .ptep_modify_prot_commit = __ptep_modify_prot_commit,
#if PAGETABLE_LEVELS >= 3 #if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
.set_pte_atomic = native_set_pte_atomic, .set_pte_atomic = native_set_pte_atomic,
.pte_clear = native_pte_clear, .pte_clear = native_pte_clear,
...@@ -454,13 +454,13 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -454,13 +454,13 @@ struct pv_mmu_ops pv_mmu_ops = {
.pmd_val = PTE_IDENT, .pmd_val = PTE_IDENT,
.make_pmd = PTE_IDENT, .make_pmd = PTE_IDENT,
#if PAGETABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
.pud_val = PTE_IDENT, .pud_val = PTE_IDENT,
.make_pud = PTE_IDENT, .make_pud = PTE_IDENT,
.set_pgd = native_set_pgd, .set_pgd = native_set_pgd,
#endif #endif
#endif /* PAGETABLE_LEVELS >= 3 */ #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
.pte_val = PTE_IDENT, .pte_val = PTE_IDENT,
.pgd_val = PTE_IDENT, .pgd_val = PTE_IDENT,
......
...@@ -58,7 +58,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) ...@@ -58,7 +58,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
tlb_remove_page(tlb, pte); tlb_remove_page(tlb, pte);
} }
#if PAGETABLE_LEVELS > 2 #if CONFIG_PGTABLE_LEVELS > 2
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{ {
struct page *page = virt_to_page(pmd); struct page *page = virt_to_page(pmd);
...@@ -74,14 +74,14 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) ...@@ -74,14 +74,14 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
tlb_remove_page(tlb, page); tlb_remove_page(tlb, page);
} }
#if PAGETABLE_LEVELS > 3 #if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{ {
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pud)); tlb_remove_page(tlb, virt_to_page(pud));
} }
#endif /* PAGETABLE_LEVELS > 3 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */
#endif /* PAGETABLE_LEVELS > 2 */ #endif /* CONFIG_PGTABLE_LEVELS > 2 */
static inline void pgd_list_add(pgd_t *pgd) static inline void pgd_list_add(pgd_t *pgd)
{ {
...@@ -117,9 +117,9 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) ...@@ -117,9 +117,9 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
/* If the pgd points to a shared pagetable level (either the /* If the pgd points to a shared pagetable level (either the
ptes in non-PAE, or shared PMD in PAE), then just copy the ptes in non-PAE, or shared PMD in PAE), then just copy the
references from swapper_pg_dir. */ references from swapper_pg_dir. */
if (PAGETABLE_LEVELS == 2 || if (CONFIG_PGTABLE_LEVELS == 2 ||
(PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
PAGETABLE_LEVELS == 4) { CONFIG_PGTABLE_LEVELS == 4) {
clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS); KERNEL_PGD_PTRS);
......
...@@ -502,7 +502,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd) ...@@ -502,7 +502,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
#if PAGETABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
__visible pudval_t xen_pud_val(pud_t pud) __visible pudval_t xen_pud_val(pud_t pud)
{ {
return pte_mfn_to_pfn(pud.pud); return pte_mfn_to_pfn(pud.pud);
...@@ -589,7 +589,7 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val) ...@@ -589,7 +589,7 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(PARAVIRT_LAZY_MMU);
} }
#endif /* PAGETABLE_LEVELS == 4 */ #endif /* CONFIG_PGTABLE_LEVELS == 4 */
/* /*
* (Yet another) pagetable walker. This one is intended for pinning a * (Yet another) pagetable walker. This one is intended for pinning a
...@@ -1628,7 +1628,7 @@ static void xen_release_pmd(unsigned long pfn) ...@@ -1628,7 +1628,7 @@ static void xen_release_pmd(unsigned long pfn)
xen_release_ptpage(pfn, PT_PMD); xen_release_ptpage(pfn, PT_PMD);
} }
#if PAGETABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{ {
xen_alloc_ptpage(mm, pfn, PT_PUD); xen_alloc_ptpage(mm, pfn, PT_PUD);
...@@ -2046,7 +2046,7 @@ static void __init xen_post_allocator_init(void) ...@@ -2046,7 +2046,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud; pv_mmu_ops.set_pud = xen_set_pud;
#if PAGETABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd; pv_mmu_ops.set_pgd = xen_set_pgd;
#endif #endif
...@@ -2056,7 +2056,7 @@ static void __init xen_post_allocator_init(void) ...@@ -2056,7 +2056,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.alloc_pmd = xen_alloc_pmd; pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
pv_mmu_ops.release_pte = xen_release_pte; pv_mmu_ops.release_pte = xen_release_pte;
pv_mmu_ops.release_pmd = xen_release_pmd; pv_mmu_ops.release_pmd = xen_release_pmd;
#if PAGETABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud; pv_mmu_ops.release_pud = xen_release_pud;
#endif #endif
...@@ -2122,14 +2122,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { ...@@ -2122,14 +2122,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.make_pmd = PV_CALLEE_SAVE(xen_make_pmd), .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
.pmd_val = PV_CALLEE_SAVE(xen_pmd_val), .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
#if PAGETABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
.pud_val = PV_CALLEE_SAVE(xen_pud_val), .pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud), .make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper, .set_pgd = xen_set_pgd_hyper,
.alloc_pud = xen_alloc_pmd_init, .alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init, .release_pud = xen_release_pmd_init,
#endif /* PAGETABLE_LEVELS == 4 */ #endif /* CONFIG_PGTABLE_LEVELS == 4 */
.activate_mm = xen_activate_mm, .activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap, .dup_mmap = xen_dup_mmap,
......
...@@ -224,7 +224,7 @@ TRACE_EVENT(xen_mmu_pmd_clear, ...@@ -224,7 +224,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
TP_printk("pmdp %p", __entry->pmdp) TP_printk("pmdp %p", __entry->pmdp)
); );
#if PAGETABLE_LEVELS >= 4 #if CONFIG_PGTABLE_LEVELS >= 4
TRACE_EVENT(xen_mmu_set_pud, TRACE_EVENT(xen_mmu_set_pud,
TP_PROTO(pud_t *pudp, pud_t pudval), TP_PROTO(pud_t *pudp, pud_t pudval),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment