Commit f342adca authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32s: Prepare Kernel Userspace Access Protection

This patch prepares Kernel Userspace Access Protection for
book3s/32.

Due to limitations of the processor page protection capabilities,
the protection is only against writing. read protection cannot be
achieved using page protection.

book3s/32 provides the following values for PP bits:

PP00 provides RW for Key 0 and NA for Key 1
PP01 provides RW for Key 0 and RO for Key 1
PP10 provides RW for all
PP11 provides RO for all

Today PP10 is used for RW pages and PP11 for RO pages, and user
segment register's Kp and Ks are set to 1. This patch modifies
page protection to use PP01 for RW pages and sets user segment
registers to Kp 0 and Ks 0.

This will allow to setup Userspace write access protection by
settng Ks to 1 in the following patch.

Kernel space segment registers remain unchanged.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 31ed2b13
...@@ -65,6 +65,8 @@ typedef pte_t *pgtable_t; ...@@ -65,6 +65,8 @@ typedef pte_t *pgtable_t;
/* Values for Segment Registers */ /* Values for Segment Registers */
#define SR_NX 0x10000000 /* No Execute */ #define SR_NX 0x10000000 /* No Execute */
#define SR_KP 0x20000000 /* User key */
#define SR_KS 0x40000000 /* Supervisor key */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -522,9 +522,9 @@ InstructionTLBMiss: ...@@ -522,9 +522,9 @@ InstructionTLBMiss:
andc. r1,r1,r0 /* check access & ~permission */ andc. r1,r1,r0 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */ bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r1, r1, 0xe05 /* clear out reserved bits */ ori r1, r1, 0xe06 /* clear out reserved bits */
andc r1, r0, r1 /* PP = user? 2 : 0 */ andc r1, r0, r1 /* PP = user? 1 : 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
...@@ -590,11 +590,11 @@ DataLoadTLBMiss: ...@@ -590,11 +590,11 @@ DataLoadTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx. * we would need to update the pte atomically with lwarx/stwcx.
*/ */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwinm r1,r0,32-10,31,31 /* _PAGE_RW -> PP lsb */ rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */ rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe04 /* clear out reserved bits */ ori r1,r1,0xe04 /* clear out reserved bits */
andc r1,r0,r1 /* PP = user? rw? 2: 3: 0 */ andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
...@@ -670,9 +670,9 @@ DataStoreTLBMiss: ...@@ -670,9 +670,9 @@ DataStoreTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx. * we would need to update the pte atomically with lwarx/stwcx.
*/ */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
li r1,0xe05 /* clear out reserved bits & PP lsb */ li r1,0xe06 /* clear out reserved bits & PP msb */
andc r1,r0,r1 /* PP = user? 2: 0 */ andc r1,r0,r1 /* PP = user? 1: 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
...@@ -896,9 +896,9 @@ load_up_mmu: ...@@ -896,9 +896,9 @@ load_up_mmu:
tophys(r6,r6) tophys(r6,r6)
lwz r6,_SDR1@l(r6) lwz r6,_SDR1@l(r6)
mtspr SPRN_SDR1,r6 mtspr SPRN_SDR1,r6
li r0, NUM_USER_SEGMENTS /* load up segment register values */ li r0, NUM_USER_SEGMENTS /* load up user segment register values */
mtctr r0 /* for context 0 */ mtctr r0 /* for context 0 */
lis r3,0x2000 /* Ku = 1, VSID = 0 */ li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
#ifdef CONFIG_PPC_KUEP #ifdef CONFIG_PPC_KUEP
oris r3, r3, SR_NX@h /* Set Nx */ oris r3, r3, SR_NX@h /* Set Nx */
#endif #endif
...@@ -910,6 +910,7 @@ load_up_mmu: ...@@ -910,6 +910,7 @@ load_up_mmu:
li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */ li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
mtctr r0 /* for context 0 */ mtctr r0 /* for context 0 */
rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */ rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
oris r3, r3, SR_KP@h /* Kp = 1 */
3: mtsrin r3, r4 3: mtsrin r3, r4
addi r3, r3, 0x111 /* increment VSID */ addi r3, r3, 0x111 /* increment VSID */
addis r4, r4, 0x1000 /* address of next segment */ addis r4, r4, 0x1000 /* address of next segment */
...@@ -1016,7 +1017,6 @@ _ENTRY(switch_mmu_context) ...@@ -1016,7 +1017,6 @@ _ENTRY(switch_mmu_context)
blt- 4f blt- 4f
mulli r3,r3,897 /* multiply context by skew factor */ mulli r3,r3,897 /* multiply context by skew factor */
rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
addis r3,r3,0x6000 /* Set Ks, Ku bits */
#ifdef CONFIG_PPC_KUEP #ifdef CONFIG_PPC_KUEP
oris r3, r3, SR_NX@h /* Set Nx */ oris r3, r3, SR_NX@h /* Set Nx */
#endif #endif
......
...@@ -309,13 +309,13 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64) ...@@ -309,13 +309,13 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
_GLOBAL(create_hpte) _GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */ rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
and r8,r8,r0 /* writable if _RW & _DIRTY */ and r8,r8,r0 /* writable if _RW & _DIRTY */
rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */ rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r8,r8,0xe04 /* clear out reserved bits */ ori r8,r8,0xe04 /* clear out reserved bits */
andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */ rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment