Commit 5874cabe authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/64: only book3s/64 supports CONFIG_PPC_64K_PAGES

CONFIG_PPC_64K_PAGES cannot be selected by nohash/64.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a521c44c
...@@ -375,7 +375,6 @@ config ZONE_DMA ...@@ -375,7 +375,6 @@ config ZONE_DMA
config PGTABLE_LEVELS config PGTABLE_LEVELS
int int
default 2 if !PPC64 default 2 if !PPC64
default 3 if PPC_64K_PAGES && !PPC_BOOK3S_64
default 4 default 4
source "arch/powerpc/sysdev/Kconfig" source "arch/powerpc/sysdev/Kconfig"
......
...@@ -171,12 +171,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, ...@@ -171,12 +171,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
#define __pmd_free_tlb(tlb, pmd, addr) \ #define __pmd_free_tlb(tlb, pmd, addr) \
pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX) pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
#ifndef CONFIG_PPC_64K_PAGES
#define __pud_free_tlb(tlb, pud, addr) \ #define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE) pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
#endif /* CONFIG_PPC_64K_PAGES */
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
#endif /* _ASM_POWERPC_PGALLOC_64_H */ #endif /* _ASM_POWERPC_PGALLOC_64_H */
...@@ -10,10 +10,6 @@ ...@@ -10,10 +10,6 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/asm-const.h> #include <asm/asm-const.h>
#ifdef CONFIG_PPC_64K_PAGES
#error "Page size not supported"
#endif
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL
/* /*
......
...@@ -60,13 +60,8 @@ ...@@ -60,13 +60,8 @@
#define _PAGE_SPECIAL _PAGE_SW0 #define _PAGE_SPECIAL _PAGE_SW0
/* Base page size */ /* Base page size */
#ifdef CONFIG_PPC_64K_PAGES
#define _PAGE_PSIZE _PAGE_PSIZE_64K
#define PTE_RPN_SHIFT (28)
#else
#define _PAGE_PSIZE _PAGE_PSIZE_4K #define _PAGE_PSIZE _PAGE_PSIZE_4K
#define PTE_RPN_SHIFT (24) #define PTE_RPN_SHIFT (24)
#endif
#define PTE_WIMGE_SHIFT (19) #define PTE_WIMGE_SHIFT (19)
#define PTE_BAP_SHIFT (2) #define PTE_BAP_SHIFT (2)
......
...@@ -33,11 +33,7 @@ static inline __be64 pmd_raw(pmd_t x) ...@@ -33,11 +33,7 @@ static inline __be64 pmd_raw(pmd_t x)
return x.pmd; return x.pmd;
} }
/* /* 64 bit always use 4 level table. */
* 64 bit hash always use 4 level table. Everybody else use 4 level
* only for 4K page size.
*/
#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
typedef struct { __be64 pud; } pud_t; typedef struct { __be64 pud; } pud_t;
#define __pud(x) ((pud_t) { cpu_to_be64(x) }) #define __pud(x) ((pud_t) { cpu_to_be64(x) })
#define __pud_raw(x) ((pud_t) { (x) }) #define __pud_raw(x) ((pud_t) { (x) })
...@@ -51,7 +47,6 @@ static inline __be64 pud_raw(pud_t x) ...@@ -51,7 +47,6 @@ static inline __be64 pud_raw(pud_t x)
return x.pud; return x.pud;
} }
#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
/* PGD level */ /* PGD level */
...@@ -77,7 +72,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -77,7 +72,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
* With hash config 64k pages additionally define a bigger "real PTE" type that * With hash config 64k pages additionally define a bigger "real PTE" type that
* gathers the "second half" part of the PTE for pseudo 64k pages * gathers the "second half" part of the PTE for pseudo 64k pages
*/ */
#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_BOOK3S_64) #ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else #else
typedef struct { pte_t pte; } real_pte_t; typedef struct { pte_t pte; } real_pte_t;
......
...@@ -23,18 +23,13 @@ static inline unsigned long pmd_val(pmd_t x) ...@@ -23,18 +23,13 @@ static inline unsigned long pmd_val(pmd_t x)
return x.pmd; return x.pmd;
} }
/* /* 64 bit always use 4 level table. */
* 64 bit hash always use 4 level table. Everybody else use 4 level
* only for 4K page size.
*/
#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
typedef struct { unsigned long pud; } pud_t; typedef struct { unsigned long pud; } pud_t;
#define __pud(x) ((pud_t) { (x) }) #define __pud(x) ((pud_t) { (x) })
static inline unsigned long pud_val(pud_t x) static inline unsigned long pud_val(pud_t x)
{ {
return x.pud; return x.pud;
} }
#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
/* PGD level */ /* PGD level */
...@@ -54,7 +49,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -54,7 +49,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
* With hash config 64k pages additionally define a bigger "real PTE" type that * With hash config 64k pages additionally define a bigger "real PTE" type that
* gathers the "second half" part of the PTE for pseudo 64k pages * gathers the "second half" part of the PTE for pseudo 64k pages
*/ */
#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_BOOK3S_64) #ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else #else
typedef struct { pte_t pte; } real_pte_t; typedef struct { pte_t pte; } real_pte_t;
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
/* /*
* For now 512TB is only supported with book3s and 64K linux page size. * For now 512TB is only supported with book3s and 64K linux page size.
*/ */
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES) #ifdef CONFIG_PPC_64K_PAGES
/* /*
* Max value currently used: * Max value currently used:
*/ */
......
...@@ -433,11 +433,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) ...@@ -433,11 +433,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
unsigned long rid = (address & rmask) | 0x1000000000000000ul; unsigned long rid = (address & rmask) | 0x1000000000000000ul;
unsigned long vpte = address & ~rmask; unsigned long vpte = address & ~rmask;
#ifdef CONFIG_PPC_64K_PAGES
vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
#else
vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
#endif
vpte |= rid; vpte |= rid;
__flush_tlb_page(tlb->mm, vpte, tsize, 0); __flush_tlb_page(tlb->mm, vpte, tsize, 0);
} }
...@@ -625,21 +621,12 @@ static void early_init_this_mmu(void) ...@@ -625,21 +621,12 @@ static void early_init_this_mmu(void)
case PPC_HTW_IBM: case PPC_HTW_IBM:
mas4 |= MAS4_INDD; mas4 |= MAS4_INDD;
#ifdef CONFIG_PPC_64K_PAGES
mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_256M;
#else
mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_1M; mmu_pte_psize = MMU_PAGE_1M;
#endif
break; break;
case PPC_HTW_NONE: case PPC_HTW_NONE:
#ifdef CONFIG_PPC_64K_PAGES
mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
#else
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
#endif
mmu_pte_psize = mmu_virtual_psize; mmu_pte_psize = mmu_virtual_psize;
break; break;
} }
......
...@@ -24,11 +24,7 @@ ...@@ -24,11 +24,7 @@
#include <asm/kvm_booke_hv_asm.h> #include <asm/kvm_booke_hv_asm.h>
#include <asm/feature-fixups.h> #include <asm/feature-fixups.h>
#ifdef CONFIG_PPC_64K_PAGES
#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
#else
#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE) #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE)
#endif
#define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE) #define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE) #define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE) #define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
...@@ -167,13 +163,11 @@ MMU_FTR_SECTION_ELSE ...@@ -167,13 +163,11 @@ MMU_FTR_SECTION_ELSE
ldx r14,r14,r15 /* grab pgd entry */ ldx r14,r14,r15 /* grab pgd entry */
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
#ifndef CONFIG_PPC_64K_PAGES
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3 clrrdi r15,r15,3
cmpdi cr0,r14,0 cmpdi cr0,r14,0
bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */ bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */
ldx r14,r14,r15 /* grab pud entry */ ldx r14,r14,r15 /* grab pud entry */
#endif /* CONFIG_PPC_64K_PAGES */
rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
clrrdi r15,r15,3 clrrdi r15,r15,3
...@@ -682,18 +676,7 @@ normal_tlb_miss: ...@@ -682,18 +676,7 @@ normal_tlb_miss:
* order to handle the weird page table format used by linux * order to handle the weird page table format used by linux
*/ */
ori r10,r15,0x1 ori r10,r15,0x1
#ifdef CONFIG_PPC_64K_PAGES
/* For the top bits, 16 bytes per PTE */
rldicl r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4
/* Now create the bottom bits as 0 in position 0x8000 and
* the rest calculated for 8 bytes per PTE
*/
rldicl r15,r16,64-(PAGE_SHIFT-3),64-15
/* Insert the bottom bits in */
rlwimi r14,r15,0,16,31
#else
rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4 rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
#endif
sldi r15,r10,60 sldi r15,r10,60
clrrdi r14,r14,3 clrrdi r14,r14,3
or r10,r15,r14 or r10,r15,r14
...@@ -732,11 +715,7 @@ finish_normal_tlb_miss: ...@@ -732,11 +715,7 @@ finish_normal_tlb_miss:
/* Check page size, if not standard, update MAS1 */ /* Check page size, if not standard, update MAS1 */
rldicl r11,r14,64-8,64-8 rldicl r11,r14,64-8,64-8
#ifdef CONFIG_PPC_64K_PAGES
cmpldi cr0,r11,BOOK3E_PAGESZ_64K
#else
cmpldi cr0,r11,BOOK3E_PAGESZ_4K cmpldi cr0,r11,BOOK3E_PAGESZ_4K
#endif
beq- 1f beq- 1f
mfspr r11,SPRN_MAS1 mfspr r11,SPRN_MAS1
rlwimi r11,r14,31,21,24 rlwimi r11,r14,31,21,24
...@@ -857,14 +836,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) ...@@ -857,14 +836,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
cmpdi cr0,r15,0 cmpdi cr0,r15,0
bge virt_page_table_tlb_miss_fault bge virt_page_table_tlb_miss_fault
#ifndef CONFIG_PPC_64K_PAGES
/* Get to PUD entry */ /* Get to PUD entry */
rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3 rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
clrrdi r10,r11,3 clrrdi r10,r11,3
ldx r15,r10,r15 ldx r15,r10,r15
cmpdi cr0,r15,0 cmpdi cr0,r15,0
bge virt_page_table_tlb_miss_fault bge virt_page_table_tlb_miss_fault
#endif /* CONFIG_PPC_64K_PAGES */
/* Get to PMD entry */ /* Get to PMD entry */
rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3 rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
...@@ -1106,14 +1083,12 @@ htw_tlb_miss: ...@@ -1106,14 +1083,12 @@ htw_tlb_miss:
cmpdi cr0,r15,0 cmpdi cr0,r15,0
bge htw_tlb_miss_fault bge htw_tlb_miss_fault
#ifndef CONFIG_PPC_64K_PAGES
/* Get to PUD entry */ /* Get to PUD entry */
rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3 rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
clrrdi r10,r11,3 clrrdi r10,r11,3
ldx r15,r10,r15 ldx r15,r10,r15
cmpdi cr0,r15,0 cmpdi cr0,r15,0
bge htw_tlb_miss_fault bge htw_tlb_miss_fault
#endif /* CONFIG_PPC_64K_PAGES */
/* Get to PMD entry */ /* Get to PMD entry */
rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3 rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
...@@ -1132,9 +1107,7 @@ htw_tlb_miss: ...@@ -1132,9 +1107,7 @@ htw_tlb_miss:
* 4K page we need to extract a bit from the virtual address and * 4K page we need to extract a bit from the virtual address and
* insert it into the "PA52" bit of the RPN. * insert it into the "PA52" bit of the RPN.
*/ */
#ifndef CONFIG_PPC_64K_PAGES
rlwimi r15,r16,32-9,20,20 rlwimi r15,r16,32-9,20,20
#endif
/* Now we build the MAS: /* Now we build the MAS:
* *
* MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
...@@ -1144,11 +1117,7 @@ htw_tlb_miss: ...@@ -1144,11 +1117,7 @@ htw_tlb_miss:
* MAS 2 : Use defaults * MAS 2 : Use defaults
* MAS 3+7 : Needs to be done * MAS 3+7 : Needs to be done
*/ */
#ifdef CONFIG_PPC_64K_PAGES
ori r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT)
#else
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
#endif
BEGIN_MMU_FTR_SECTION BEGIN_MMU_FTR_SECTION
srdi r16,r10,32 srdi r16,r10,32
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment