Commit 812fadcb authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: extend _PAGE_PRIVILEGED to all CPUs

commit ac29c640 ("powerpc/mm: Replace _PAGE_USER with
_PAGE_PRIVILEGED") introduced _PAGE_PRIVILEGED for BOOK3S/64

This patch generalises _PAGE_PRIVILEGED for all CPUs, allowing
to have either _PAGE_PRIVILEGED or _PAGE_USER or both.

PPC_8xx has a _PAGE_SHARED flag which is set for and only for
all non user pages. Lets rename it _PAGE_PRIVILEGED to remove
confusion as it has nothing to do with Linux shared pages.

On BookE, there's a _PAGE_BAP_SR which has to be set for kernel
pages: defining _PAGE_PRIVILEGED as _PAGE_BAP_SR will make
this generic
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 5f356497
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#define _PAGE_BIT_SWAP_TYPE 0 #define _PAGE_BIT_SWAP_TYPE 0
#define _PAGE_RO 0 #define _PAGE_RO 0
#define _PAGE_SHARED 0 #define _PAGE_USER 0
#define _PAGE_EXEC 0x00001 /* execute permission */ #define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */ #define _PAGE_WRITE 0x00002 /* write access allowed */
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
/* Definitions for 8xx embedded chips. */ /* Definitions for 8xx embedded chips. */
#define _PAGE_PRESENT 0x0001 /* Page is valid */ #define _PAGE_PRESENT 0x0001 /* Page is valid */
#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ #define _PAGE_PRIVILEGED 0x0004 /* No ASID (context) compare */
#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */ #define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
#define _PAGE_DIRTY 0x0100 /* C: page changed */ #define _PAGE_DIRTY 0x0100 /* C: page changed */
...@@ -54,13 +54,5 @@ ...@@ -54,13 +54,5 @@
/* Until my rework is finished, 8xx still needs atomic PTE updates */ /* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1 #define PTE_ATOMIC_UPDATES 1
/* We need to add _PAGE_SHARED to kernel pages */
#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_RO)
#define _PAGE_KERNEL_ROX (_PAGE_SHARED | _PAGE_RO | _PAGE_EXEC)
#define _PAGE_KERNEL_RW (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
_PAGE_HWWRITE)
#define _PAGE_KERNEL_RWX (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
_PAGE_HWWRITE | _PAGE_EXEC)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX) #define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX) #define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX)
#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ #define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
#define _PAGE_PRIVILEGED (_PAGE_BAP_SR)
#define _PAGE_HASHPTE 0 #define _PAGE_HASHPTE 0
#define _PAGE_BUSY 0 #define _PAGE_BUSY 0
......
...@@ -8,9 +8,6 @@ ...@@ -8,9 +8,6 @@
#ifndef _PAGE_HASHPTE #ifndef _PAGE_HASHPTE
#define _PAGE_HASHPTE 0 #define _PAGE_HASHPTE 0
#endif #endif
#ifndef _PAGE_SHARED
#define _PAGE_SHARED 0
#endif
#ifndef _PAGE_HWWRITE #ifndef _PAGE_HWWRITE
#define _PAGE_HWWRITE 0 #define _PAGE_HWWRITE 0
#endif #endif
...@@ -45,6 +42,14 @@ ...@@ -45,6 +42,14 @@
#ifndef _PAGE_PTE #ifndef _PAGE_PTE
#define _PAGE_PTE 0 #define _PAGE_PTE 0
#endif #endif
/* At least one of _PAGE_PRIVILEGED or _PAGE_USER must be defined */
#ifndef _PAGE_PRIVILEGED
#define _PAGE_PRIVILEGED 0
#else
#ifndef _PAGE_USER
#define _PAGE_USER 0
#endif
#endif
#ifndef _PMD_PRESENT_MASK #ifndef _PMD_PRESENT_MASK
#define _PMD_PRESENT_MASK _PMD_PRESENT #define _PMD_PRESENT_MASK _PMD_PRESENT
...@@ -54,16 +59,18 @@ ...@@ -54,16 +59,18 @@
#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
#endif #endif
#ifndef _PAGE_KERNEL_RO #ifndef _PAGE_KERNEL_RO
#define _PAGE_KERNEL_RO (_PAGE_RO) #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_RO)
#endif #endif
#ifndef _PAGE_KERNEL_ROX #ifndef _PAGE_KERNEL_ROX
#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO) #define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_RO | _PAGE_EXEC)
#endif #endif
#ifndef _PAGE_KERNEL_RW #ifndef _PAGE_KERNEL_RW
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) #define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \
_PAGE_HWWRITE)
#endif #endif
#ifndef _PAGE_KERNEL_RWX #ifndef _PAGE_KERNEL_RWX
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC) #define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \
_PAGE_HWWRITE | _PAGE_EXEC)
#endif #endif
#ifndef _PAGE_HPTEFLAGS #ifndef _PAGE_HPTEFLAGS
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
...@@ -85,7 +92,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); ...@@ -85,7 +92,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
*/ */
static inline bool pte_user(pte_t pte) static inline bool pte_user(pte_t pte)
{ {
return (pte_val(pte) & _PAGE_USER) == _PAGE_USER; return (pte_val(pte) & (_PAGE_USER | _PAGE_PRIVILEGED)) == _PAGE_USER;
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
...@@ -116,6 +123,7 @@ static inline bool pte_user(pte_t pte) ...@@ -116,6 +123,7 @@ static inline bool pte_user(pte_t pte)
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
_PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \ _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
_PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | \ _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | \
_PAGE_PRIVILEGED | \
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC) _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
/* /*
......
...@@ -678,7 +678,7 @@ DTLBMissIMMR: ...@@ -678,7 +678,7 @@ DTLBMissIMMR:
mtspr SPRN_MD_TWC, r10 mtspr SPRN_MD_TWC, r10
mfspr r10, SPRN_IMMR /* Get current IMMR */ mfspr r10, SPRN_IMMR /* Get current IMMR */
rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
_PAGE_PRESENT | _PAGE_NO_CACHE _PAGE_PRESENT | _PAGE_NO_CACHE
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
...@@ -696,7 +696,7 @@ DTLBMissLinear: ...@@ -696,7 +696,7 @@ DTLBMissLinear:
li r11, MD_PS8MEG | MD_SVALID li r11, MD_PS8MEG | MD_SVALID
mtspr SPRN_MD_TWC, r11 mtspr SPRN_MD_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
_PAGE_PRESENT _PAGE_PRESENT
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
...@@ -715,7 +715,7 @@ ITLBMissLinear: ...@@ -715,7 +715,7 @@ ITLBMissLinear:
li r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC li r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC
mtspr SPRN_MI_TWC, r11 mtspr SPRN_MI_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
_PAGE_PRESENT _PAGE_PRESENT
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
......
...@@ -67,7 +67,7 @@ void __init MMU_init_hw(void) ...@@ -67,7 +67,7 @@ void __init MMU_init_hw(void)
/* PIN up to the 3 first 8Mb after IMMR in DTLB table */ /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
#ifdef CONFIG_PIN_TLB_DATA #ifdef CONFIG_PIN_TLB_DATA
unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY; unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY;
#ifdef CONFIG_PIN_TLB_IMMR #ifdef CONFIG_PIN_TLB_IMMR
int i = 29; int i = 29;
#else #else
......
...@@ -112,13 +112,8 @@ struct flag_info { ...@@ -112,13 +112,8 @@ struct flag_info {
static const struct flag_info flag_array[] = { static const struct flag_info flag_array[] = {
{ {
#ifdef CONFIG_PPC_BOOK3S_64 .mask = _PAGE_USER | _PAGE_PRIVILEGED,
.mask = _PAGE_PRIVILEGED,
.val = 0,
#else
.mask = _PAGE_USER,
.val = _PAGE_USER, .val = _PAGE_USER,
#endif
.set = "user", .set = "user",
.clear = " ", .clear = " ",
}, { }, {
...@@ -229,10 +224,6 @@ static const struct flag_info flag_array[] = { ...@@ -229,10 +224,6 @@ static const struct flag_info flag_array[] = {
.mask = _PAGE_SPECIAL, .mask = _PAGE_SPECIAL,
.val = _PAGE_SPECIAL, .val = _PAGE_SPECIAL,
.set = "special", .set = "special",
}, {
.mask = _PAGE_SHARED,
.val = _PAGE_SHARED,
.set = "shared",
} }
}; };
......
...@@ -54,7 +54,8 @@ static inline int pte_looks_normal(pte_t pte) ...@@ -54,7 +54,8 @@ static inline int pte_looks_normal(pte_t pte)
return 0; return 0;
#else #else
return (pte_val(pte) & return (pte_val(pte) &
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) == (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER |
_PAGE_PRIVILEGED)) ==
(_PAGE_PRESENT | _PAGE_USER); (_PAGE_PRESENT | _PAGE_USER);
#endif #endif
} }
......
...@@ -98,14 +98,7 @@ ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) ...@@ -98,14 +98,7 @@ ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC); flags &= ~(_PAGE_USER | _PAGE_EXEC);
flags |= _PAGE_PRIVILEGED;
#ifdef _PAGE_BAP_SR
/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
* which means that we just cleared supervisor access... oops ;-) This
* restores it
*/
flags |= _PAGE_BAP_SR;
#endif
return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
} }
......
...@@ -244,20 +244,8 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, ...@@ -244,20 +244,8 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
/* /*
* Force kernel mapping. * Force kernel mapping.
*/ */
#if defined(CONFIG_PPC_BOOK3S_64)
flags |= _PAGE_PRIVILEGED;
#else
flags &= ~_PAGE_USER; flags &= ~_PAGE_USER;
#endif flags |= _PAGE_PRIVILEGED;
#ifdef _PAGE_BAP_SR
/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
* which means that we just cleared supervisor access... oops ;-) This
* restores it
*/
flags |= _PAGE_BAP_SR;
#endif
if (ppc_md.ioremap) if (ppc_md.ioremap)
return ppc_md.ioremap(addr, size, flags, caller); return ppc_md.ioremap(addr, size, flags, caller);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment