Commit 13b3d13b authored by Michael Ellerman's avatar Michael Ellerman Committed by Benjamin Herrenschmidt

powerpc: Remove MMU_FTR_SLB

We now only support cpus that use an SLB, so we don't need an MMU
feature to indicate that.
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 376af594
...@@ -195,8 +195,7 @@ extern const char *powerpc_base_platform; ...@@ -195,8 +195,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN) #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_SLB | MMU_FTR_TLBIEL | \ #define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE)
MMU_FTR_16M_PAGE)
/* We only set the altivec features if the kernel was compiled with altivec /* We only set the altivec features if the kernel was compiled with altivec
* support * support
......
...@@ -64,9 +64,9 @@ ...@@ -64,9 +64,9 @@
*/ */
#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000) #define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
/* MMU is SLB-based /* Doesn't support the B bit (1T segment) in SLBIE
*/ */
#define MMU_FTR_SLB ASM_CONST(0x02000000) #define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000)
/* Support 16M large pages /* Support 16M large pages
*/ */
...@@ -88,10 +88,6 @@ ...@@ -88,10 +88,6 @@
*/ */
#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) #define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
/* Doesn't support the B bit (1T segment) in SLBIE
*/
#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x80000000)
/* MMU feature bit sets for various CPUs */ /* MMU feature bit sets for various CPUs */
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
......
...@@ -76,8 +76,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -76,8 +76,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* sub architectures. * sub architectures.
*/ */
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
if (mmu_has_feature(MMU_FTR_SLB)) switch_slb(tsk, next);
switch_slb(tsk, next);
#else #else
/* Out of line for now */ /* Out of line for now */
switch_mmu_context(prev, next); switch_mmu_context(prev, next);
......
...@@ -482,16 +482,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) ...@@ -482,16 +482,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
ld r8,KSP(r4) /* new stack pointer */ ld r8,KSP(r4) /* new stack pointer */
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
BEGIN_FTR_SECTION_NESTED(95)
clrrdi r6,r8,28 /* get its ESID */ clrrdi r6,r8,28 /* get its ESID */
clrrdi r9,r1,28 /* get current sp ESID */ clrrdi r9,r1,28 /* get current sp ESID */
FTR_SECTION_ELSE_NESTED(95) FTR_SECTION_ELSE
clrrdi r6,r8,40 /* get its 1T ESID */ clrrdi r6,r8,40 /* get its 1T ESID */
clrrdi r9,r1,40 /* get current sp 1T ESID */ clrrdi r9,r1,40 /* get current sp 1T ESID */
ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
FTR_SECTION_ELSE
b 2f
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
clrldi. r0,r6,2 /* is new ESID c00000000? */ clrldi. r0,r6,2 /* is new ESID c00000000? */
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
cror eq,4*cr1+eq,eq cror eq,4*cr1+eq,eq
......
...@@ -1175,7 +1175,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -1175,7 +1175,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif #endif
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
if (mmu_has_feature(MMU_FTR_SLB)) { {
unsigned long sp_vsid; unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
......
...@@ -155,7 +155,6 @@ static struct ibm_pa_feature { ...@@ -155,7 +155,6 @@ static struct ibm_pa_feature {
} ibm_pa_features[] __initdata = { } ibm_pa_features[] __initdata = {
{0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
{0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
{0, MMU_FTR_SLB, 0, 0, 2, 0},
{CPU_FTR_CTRL, 0, 0, 0, 3, 0}, {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
{CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
{CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
......
...@@ -828,8 +828,7 @@ void __init early_init_mmu(void) ...@@ -828,8 +828,7 @@ void __init early_init_mmu(void)
htab_initialize(); htab_initialize();
/* Initialize SLB management */ /* Initialize SLB management */
if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize();
slb_initialize();
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -840,8 +839,7 @@ void early_init_mmu_secondary(void) ...@@ -840,8 +839,7 @@ void early_init_mmu_secondary(void)
mtspr(SPRN_SDR1, _SDR1); mtspr(SPRN_SDR1, _SDR1);
/* Initialize SLB */ /* Initialize SLB */
if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize();
slb_initialize();
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -2690,7 +2690,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid, ...@@ -2690,7 +2690,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
} }
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
static void dump_slb(void) void dump_segments(void)
{ {
int i; int i;
unsigned long esid,vsid,valid; unsigned long esid,vsid,valid;
...@@ -2722,12 +2722,6 @@ static void dump_slb(void) ...@@ -2722,12 +2722,6 @@ static void dump_slb(void)
} }
} }
} }
void dump_segments(void)
{
if (mmu_has_feature(MMU_FTR_SLB))
dump_slb();
}
#endif #endif
#ifdef CONFIG_PPC_STD_MMU_32 #ifdef CONFIG_PPC_STD_MMU_32
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment