Commit c8c06f5a authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Benjamin Herrenschmidt

powerpc/mm: Free up _PAGE_COHERENCE for numa fault use later

Set  memory coherence always on hash64 config. If
a platform cannot have memory coherence always set they
can infer that from _PAGE_NO_CACHE and _PAGE_WRITETHRU
like in lpar. So we dont' really need a separate bit
for tracking _PAGE_COHERENCE.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 92c08a0d
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED 0x0008 #define _PAGE_GUARDED 0x0008
#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ /* We can derive Memory coherence from _PAGE_NO_CACHE */
#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
#define _PAGE_DIRTY 0x0080 /* C: page changed */ #define _PAGE_DIRTY 0x0080 /* C: page changed */
......
...@@ -148,7 +148,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -148,7 +148,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */ andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ /*
* Always add "C" bit for perf. Memory coherence is always enabled
*/
ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that /* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...) * code rather than call a C function...)
...@@ -457,7 +460,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -457,7 +460,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r3,r0 /* r0 = pte & ~r0 */ andc r0,r3,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ /*
* Always add "C" bit for perf. Memory coherence is always enabled
*/
ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that /* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...) * code rather than call a C function...)
...@@ -795,7 +801,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -795,7 +801,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */ andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ /*
* Always add "C" bit for perf. Memory coherence is always enabled
*/
ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that /* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...) * code rather than call a C function...)
......
...@@ -169,9 +169,10 @@ static unsigned long htab_convert_pte_flags(unsigned long pteflags) ...@@ -169,9 +169,10 @@ static unsigned long htab_convert_pte_flags(unsigned long pteflags)
if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
(pteflags & _PAGE_DIRTY))) (pteflags & _PAGE_DIRTY)))
rflags |= 1; rflags |= 1;
/*
/* Always add C */ * Always add "C" bit for perf. Memory coherence is always enabled
return rflags | HPTE_R_C; */
return rflags | HPTE_R_C | HPTE_R_M;
} }
int htab_bolt_mapping(unsigned long vstart, unsigned long vend, int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
......
...@@ -127,7 +127,11 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -127,7 +127,11 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
/* Add in WIMG bits */ /* Add in WIMG bits */
rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
_PAGE_COHERENT | _PAGE_GUARDED)); _PAGE_GUARDED));
/*
* enable the memory coherence always
*/
rflags |= HPTE_R_M;
/* Insert into the hash table, primary slot */ /* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
......
...@@ -99,6 +99,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -99,6 +99,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* Add in WIMG bits */ /* Add in WIMG bits */
rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
_PAGE_COHERENT | _PAGE_GUARDED)); _PAGE_COHERENT | _PAGE_GUARDED));
/*
* enable the memory coherence always
*/
rflags |= HPTE_R_M;
slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
mmu_psize, ssize); mmu_psize, ssize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment