Commit d5f17ee9 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/8xx: don't disable large TLBs with CONFIG_STRICT_KERNEL_RWX

This patch implements handling of STRICT_KERNEL_RWX with
large TLBs directly in the TLB miss handlers.

To do so, etext and sinittext are aligned on 512kB boundaries
and the miss handlers use 512kB pages instead of 8Mb pages for
addresses close to the boundaries.

It sets RO PP flags for addresses under sinittext.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 0f4a9041
...@@ -735,6 +735,7 @@ config ETEXT_SHIFT ...@@ -735,6 +735,7 @@ config ETEXT_SHIFT
int "_etext shift" if ETEXT_SHIFT_BOOL int "_etext shift" if ETEXT_SHIFT_BOOL
range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 17 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 17 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 19 if STRICT_KERNEL_RWX && PPC_8xx
default PPC_PAGE_SHIFT default PPC_PAGE_SHIFT
help help
On Book3S 32 (603+), IBATs are used to map kernel text. On Book3S 32 (603+), IBATs are used to map kernel text.
...@@ -755,6 +756,7 @@ config DATA_SHIFT ...@@ -755,6 +756,7 @@ config DATA_SHIFT
default 24 if STRICT_KERNEL_RWX && PPC64 default 24 if STRICT_KERNEL_RWX && PPC64
range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
default 19 if STRICT_KERNEL_RWX && PPC_8xx
default PPC_PAGE_SHIFT default PPC_PAGE_SHIFT
help help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
......
...@@ -231,9 +231,10 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) ...@@ -231,9 +231,10 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
} }
/* patch sites */ /* patch sites */
extern s32 patch__itlbmiss_linmem_top; extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8;
extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp; extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
extern s32 patch__fixupdar_linmem_top; extern s32 patch__fixupdar_linmem_top;
extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8;
extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2; extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3; extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
......
...@@ -292,6 +292,17 @@ SystemCall: ...@@ -292,6 +292,17 @@ SystemCall:
*/ */
EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD) EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
#ifdef CONFIG_PERF_EVENTS
patch_site 0f, patch__dtlbmiss_perf
0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
addi r10, r10, 1
stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
rfi
#endif
. = 0x1100 . = 0x1100
/* /*
* For the MPC8xx, this is a software tablewalk to load the instruction * For the MPC8xx, this is a software tablewalk to load the instruction
...@@ -405,10 +416,20 @@ InstructionTLBMiss: ...@@ -405,10 +416,20 @@ InstructionTLBMiss:
#ifndef CONFIG_PIN_TLB_TEXT #ifndef CONFIG_PIN_TLB_TEXT
ITLBMissLinear: ITLBMissLinear:
mtcr r11 mtcr r11
#ifdef CONFIG_STRICT_KERNEL_RWX
patch_site 0f, patch__itlbmiss_linmem_top8
mfspr r10, SPRN_SRR0
0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha
rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K
ori r11, r11, MI_PS512K | MI_SVALID
rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */
#else
/* Set 8M byte page and mark it valid */ /* Set 8M byte page and mark it valid */
li r11, MI_PS8MEG | MI_SVALID li r11, MI_PS8MEG | MI_SVALID
mtspr SPRN_MI_TWC, r11
rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
#endif
mtspr SPRN_MI_TWC, r11
ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT _PAGE_PRESENT
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
...@@ -494,16 +515,6 @@ DataStoreTLBMiss: ...@@ -494,16 +515,6 @@ DataStoreTLBMiss:
rfi rfi
patch_site 0b, patch__dtlbmiss_exit_1 patch_site 0b, patch__dtlbmiss_exit_1
#ifdef CONFIG_PERF_EVENTS
patch_site 0f, patch__dtlbmiss_perf
0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
addi r10, r10, 1
stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
rfi
#endif
DTLBMissIMMR: DTLBMissIMMR:
mtcr r11 mtcr r11
/* Set 512k byte guarded page and mark it valid */ /* Set 512k byte guarded page and mark it valid */
...@@ -525,10 +536,29 @@ DTLBMissIMMR: ...@@ -525,10 +536,29 @@ DTLBMissIMMR:
DTLBMissLinear: DTLBMissLinear:
mtcr r11 mtcr r11
rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
#ifdef CONFIG_STRICT_KERNEL_RWX
patch_site 0f, patch__dtlbmiss_romem_top8
0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha
rlwinm r11, r11, 0, 0xff800000
neg r10, r11
or r11, r11, r10
rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K
ori r11, r11, MI_PS512K | MI_SVALID
mfspr r10, SPRN_MD_EPN
rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */
#else
/* Set 8M byte page and mark it valid */ /* Set 8M byte page and mark it valid */
li r11, MD_PS8MEG | MD_SVALID li r11, MD_PS8MEG | MD_SVALID
#endif
mtspr SPRN_MD_TWC, r11 mtspr SPRN_MD_TWC, r11
rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ #ifdef CONFIG_STRICT_KERNEL_RWX
patch_site 0f, patch__dtlbmiss_romem_top
0: subis r11, r10, 0
rlwimi r10, r11, 11, _PAGE_RO
#endif
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT _PAGE_PRESENT
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
......
...@@ -94,11 +94,20 @@ static void __init mmu_mapin_immr(void) ...@@ -94,11 +94,20 @@ static void __init mmu_mapin_immr(void)
map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
} }
static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped) static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
{ {
modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16); modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
} }
static void mmu_patch_addis(s32 *site, long simm)
{
unsigned int instr = *(unsigned int *)patch_site_addr(site);
instr &= 0xffff0000;
instr |= ((unsigned long)simm) >> 16;
patch_instruction_site(site, instr);
}
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{ {
unsigned long mapped; unsigned long mapped;
...@@ -135,6 +144,26 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -135,6 +144,26 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return mapped; return mapped;
} }
void mmu_mark_initmem_nx(void)
{
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
mmu_patch_addis(&patch__itlbmiss_linmem_top8,
-((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
{
if (CONFIG_DATA_SHIFT < 23)
mmu_patch_addis(&patch__dtlbmiss_romem_top8,
-__pa(((unsigned long)_sinittext) &
~(LARGE_PAGE_SIZE_8M - 1)));
mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
}
#endif
void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size) phys_addr_t first_memblock_size)
{ {
......
...@@ -108,7 +108,7 @@ static void __init MMU_setup(void) ...@@ -108,7 +108,7 @@ static void __init MMU_setup(void)
__map_without_bats = 1; __map_without_bats = 1;
__map_without_ltlbs = 1; __map_without_ltlbs = 1;
} }
if (strict_kernel_rwx_enabled()) if (strict_kernel_rwx_enabled() && !IS_ENABLED(CONFIG_PPC_8xx))
__map_without_ltlbs = 1; __map_without_ltlbs = 1;
} }
......
...@@ -166,7 +166,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } ...@@ -166,7 +166,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; }
static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif #endif
#if defined(CONFIG_PPC_BOOK3S_32) #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
void mmu_mark_initmem_nx(void); void mmu_mark_initmem_nx(void);
void mmu_mark_rodata_ro(void); void mmu_mark_rodata_ro(void);
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment