Commit cd03adb0 authored by Russell King's avatar Russell King Committed by Russell King

[ARM SMP] Add support for shared memory attribute

We need to set the shared memory attribute in the page tables
on SMP systems to allow the cache coherency to operate.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 0b154bb7
...@@ -354,7 +354,7 @@ void __init build_mem_type_table(void) ...@@ -354,7 +354,7 @@ void __init build_mem_type_table(void)
{ {
struct cachepolicy *cp; struct cachepolicy *cp;
unsigned int cr = get_cr(); unsigned int cr = get_cr();
unsigned int user_pgprot; unsigned int user_pgprot, kern_pgprot;
int cpu_arch = cpu_architecture(); int cpu_arch = cpu_architecture();
int i; int i;
...@@ -381,7 +381,7 @@ void __init build_mem_type_table(void) ...@@ -381,7 +381,7 @@ void __init build_mem_type_table(void)
} }
cp = &cache_policies[cachepolicy]; cp = &cache_policies[cachepolicy];
user_pgprot = cp->pte; kern_pgprot = user_pgprot = cp->pte;
/* /*
* ARMv6 and above have extended page tables. * ARMv6 and above have extended page tables.
...@@ -393,6 +393,7 @@ void __init build_mem_type_table(void) ...@@ -393,6 +393,7 @@ void __init build_mem_type_table(void)
*/ */
mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
/* /*
* Mark cache clean areas and XIP ROM read only * Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace. * from SVC mode and no access from userspace.
...@@ -412,32 +413,47 @@ void __init build_mem_type_table(void) ...@@ -412,32 +413,47 @@ void __init build_mem_type_table(void)
* (iow, non-global) * (iow, non-global)
*/ */
user_pgprot |= L_PTE_ASID; user_pgprot |= L_PTE_ASID;
#ifdef CONFIG_SMP
/*
* Mark memory with the "shared" attribute for SMP systems
*/
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
#endif
} }
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
protection_map[i] = __pgprot(v);
}
mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
if (cpu_arch >= CPU_ARCH_ARMv5) { if (cpu_arch >= CPU_ARCH_ARMv5) {
mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; #ifndef CONFIG_SMP
mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; /*
* Only use write-through for non-SMP systems
*/
mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
#endif
} else { } else {
mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
} }
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | L_PTE_WRITE |
L_PTE_EXEC | kern_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_ROM].prot_sect |= cp->pmd; mem_types[MT_ROM].prot_sect |= cp->pmd;
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
protection_map[i] = __pgprot(v);
}
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | L_PTE_WRITE |
L_PTE_EXEC | cp->pte);
switch (cp->pmd) { switch (cp->pmd) {
case PMD_SECT_WT: case PMD_SECT_WT:
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
......
...@@ -112,6 +112,9 @@ ENTRY(cpu_v6_dcache_clean_area) ...@@ -112,6 +112,9 @@ ENTRY(cpu_v6_dcache_clean_area)
ENTRY(cpu_v6_switch_mm) ENTRY(cpu_v6_switch_mm)
mov r2, #0 mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
#ifdef CONFIG_SMP
orr r0, r0, #2 @ set shared pgtable
#endif
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
...@@ -140,7 +143,7 @@ ENTRY(cpu_v6_switch_mm) ...@@ -140,7 +143,7 @@ ENTRY(cpu_v6_switch_mm)
ENTRY(cpu_v6_set_pte) ENTRY(cpu_v6_set_pte)
str r1, [r0], #-2048 @ linux version str r1, [r0], #-2048 @ linux version
bic r2, r1, #0x000007f0 bic r2, r1, #0x000003f0
bic r2, r2, #0x00000003 bic r2, r2, #0x00000003
orr r2, r2, #PTE_EXT_AP0 | 2 orr r2, r2, #PTE_EXT_AP0 | 2
...@@ -198,6 +201,9 @@ __v6_setup: ...@@ -198,6 +201,9 @@ __v6_setup:
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register mcr p15, 0, r0, c2, c0, 2 @ TTB control register
#ifdef CONFIG_SMP
orr r4, r4, #2 @ set shared pgtable
#endif
mcr p15, 0, r4, c2, c0, 1 @ load TTB1 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#ifdef CONFIG_VFP #ifdef CONFIG_VFP
mrc p15, 0, r0, c1, c0, 2 mrc p15, 0, r0, c1, c0, 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment