Commit 3ae93f09 authored by Russell King's avatar Russell King

[ARM] Re-work L1 pagetable bit handling.

Remove run-time special casing of L1 page table permissions/cache/
control bits; instead set up the bits once at boot time.
parent 57739d81
......@@ -170,6 +170,26 @@ static const char *cache_lockdown[16] = {
"undefined 15",
};
static const char *proc_arch[] = {
"undefined/unknown",
"3",
"4",
"4T",
"5",
"5T",
"5TE",
"?(8)",
"?(9)",
"?(10)",
"?(11)",
"?(12)",
"?(13)",
"?(14)",
"?(15)",
"?(16)",
"?(17)",
};
#define CACHE_TYPE(x) (((x) >> 25) & 15)
#define CACHE_S(x) ((x) & (1 << 24))
#define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
......@@ -214,6 +234,23 @@ static void __init dump_cpu_info(void)
#define dump_cpu_info() do { } while (0)
#endif
int cpu_architecture(void)
{
int cpu_arch;
if ((processor_id & 0x0000f000) == 0) {
cpu_arch = CPU_ARCH_UNKNOWN;
} else if ((processor_id & 0x0000f000) == 0x00007000) {
cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
} else {
cpu_arch = (processor_id >> 16) & 15;
if (cpu_arch)
cpu_arch += CPU_ARCH_ARMv3;
}
return cpu_arch;
}
static void __init setup_processor(void)
{
extern struct proc_info_list __proc_info_begin, __proc_info_end;
......@@ -250,9 +287,9 @@ static void __init setup_processor(void)
cpu_user = *list->user;
#endif
printk("CPU: %s %s revision %d\n",
printk("CPU: %s %s revision %d (ARMv%s)\n",
proc_info.manufacturer, proc_info.cpu_name,
(int)processor_id & 15);
(int)processor_id & 15, proc_arch[cpu_architecture()]);
dump_cpu_info();
......@@ -666,25 +703,6 @@ static const char *hwcap_str[] = {
NULL
};
static const char *proc_arch[16] = {
"undefined 0",
"4",
"4T",
"5",
"5T",
"5TE",
"undefined 6",
"undefined 7",
"undefined 8",
"undefined 9",
"undefined 10",
"undefined 11",
"undefined 12",
"undefined 13",
"undefined 14",
"undefined 15"
};
static void
c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
{
......@@ -720,30 +738,23 @@ static int c_show(struct seq_file *m, void *v)
if (elf_hwcap & (1 << i))
seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n");
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
if ((processor_id & 0x0000f000) == 0x00000000) {
/* pre-ARM7 */
seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
} else if ((processor_id & 0x0000f000) == 0x00007000) {
} else {
if ((processor_id & 0x0000f000) == 0x00007000) {
/* ARM7 */
seq_printf(m, "CPU implementor\t: 0x%02x\n"
"CPU architecture: %s\n"
"CPU variant\t: 0x%02x\n"
"CPU part\t: 0x%03x\n",
processor_id >> 24,
processor_id & (1 << 23) ? "4T" : "3",
(processor_id >> 16) & 127,
(processor_id >> 4) & 0xfff);
seq_printf(m, "CPU variant\t: 0x%02x\n",
(processor_id >> 16) & 127);
} else {
/* post-ARM7 */
seq_printf(m, "CPU implementor\t: 0x%02x\n"
"CPU architecture: %s\n"
"CPU variant\t: 0x%x\n"
"CPU part\t: 0x%03x\n",
processor_id >> 24,
proc_arch[(processor_id >> 16) & 15],
(processor_id >> 20) & 15,
seq_printf(m, "CPU variant\t: 0x%x\n",
(processor_id >> 20) & 15);
}
seq_printf(m, "CPU part\t: 0x%03x\n",
(processor_id >> 4) & 0xfff);
}
seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
......
......@@ -169,7 +169,7 @@ alloc_init_section(unsigned long virt, unsigned long phys, int prot)
pmd_t *pmdp, pmd;
pmdp = pmd_offset(pgd_offset_k(virt), virt);
if (virt & (1 << PMD_SHIFT))
if (virt & (1 << 20))
pmdp++;
pmd_val(pmd) = phys | prot;
......@@ -184,7 +184,7 @@ alloc_init_section(unsigned long virt, unsigned long phys, int prot)
* the hardware pte table.
*/
static inline void
alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
{
pmd_t *pmdp, pmd;
pte_t *ptep;
......@@ -195,14 +195,14 @@ alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
sizeof(pte_t));
pmd_val(pmd) = __pa(ptep) | PMD_TYPE_TABLE | PMD_DOMAIN(domain);
pmd_val(pmd) = __pa(ptep) | prot_l1;
set_pmd(pmdp, pmd);
pmd_val(pmd) += 256 * sizeof(pte_t);
set_pmd(pmdp + 1, pmd);
}
ptep = pte_offset_kernel(pmdp, virt);
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(prot)));
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
}
/*
......@@ -217,6 +217,7 @@ static inline void clear_mapping(unsigned long virt)
struct mem_types {
unsigned int prot_pte;
unsigned int prot_l1;
unsigned int prot_sect;
unsigned int domain;
};
......@@ -225,40 +226,82 @@ static struct mem_types mem_types[] __initdata = {
[MT_DEVICE] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_CACHEABLE |
PMD_SECT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_KERNEL,
},
[MT_MINICLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_CACHEABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
[MT_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_CACHEABLE |
PMD_SECT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC | L_PTE_WRITE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_CACHEABLE |
PMD_SECT_BUFFERABLE | PMD_SECT_AP_WRITE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
}
};
/*
* Adjust the PMD section entries according to the CPU in use.
*/
static void __init build_mem_type_table(void)
{
int cpu_arch = cpu_architecture();
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
int writethrough = 1;
#else
int writethrough = 0;
#endif
int writealloc = 0, ecc = 0;
if (cpu_arch < CPU_ARCH_ARMv5) {
writealloc = 0;
ecc = 0;
mem_types[MT_MINICACHE].prot_sect &= ~PMD_SECT_TEX(1);
}
if (writethrough) {
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WT;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WT;
} else {
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WB;
if (writealloc)
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WBWA;
else
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WB;
}
if (ecc) {
mem_types[MT_VECTORS].prot_sect |= PMD_PROTECTION;
mem_types[MT_MEMORY].prot_sect |= PMD_PROTECTION;
}
}
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
......@@ -268,7 +311,8 @@ static struct mem_types mem_types[] __initdata = {
static void __init create_mapping(struct map_desc *md)
{
unsigned long virt, length;
int prot_sect, prot_pte, domain;
int prot_sect, prot_l1, domain;
pgprot_t prot_pte;
long off;
if (md->virtual != vectors_base() && md->virtual < PAGE_OFFSET) {
......@@ -279,7 +323,8 @@ static void __init create_mapping(struct map_desc *md)
}
domain = mem_types[md->type].domain;
prot_pte = mem_types[md->type].prot_pte;
prot_pte = __pgprot(mem_types[md->type].prot_pte);
prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
virt = md->virtual;
......@@ -287,7 +332,7 @@ static void __init create_mapping(struct map_desc *md)
length = md->length;
while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
alloc_init_page(virt, virt + off, domain, prot_pte);
alloc_init_page(virt, virt + off, prot_l1, prot_pte);
virt += PAGE_SIZE;
length -= PAGE_SIZE;
......@@ -301,7 +346,7 @@ static void __init create_mapping(struct map_desc *md)
}
while (length >= PAGE_SIZE) {
alloc_init_page(virt, virt + off, domain, prot_pte);
alloc_init_page(virt, virt + off, prot_l1, prot_pte);
virt += PAGE_SIZE;
length -= PAGE_SIZE;
......@@ -343,6 +388,8 @@ void __init memtable_init(struct meminfo *mi)
unsigned long address = 0;
int i;
build_mem_type_table();
init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
for (i = 0; i < mi->nr_banks; i++) {
......
......@@ -375,11 +375,6 @@ ENTRY(cpu_arm1020_set_pgd)
*/
.align 5
ENTRY(cpu_arm1020_set_pmd)
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r2, r1, #0x0a @ C & Section
tst r2, #0x0b
biceq r1, r1, #4 @ clear bufferable bit
#endif
str r1, [r0]
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 4
......
......@@ -378,11 +378,6 @@ ENTRY(cpu_arm920_set_pgd)
*/
.align 5
ENTRY(cpu_arm920_set_pmd)
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r2, r1, #0x0a @ C & Section
tst r2, #0x0b
biceq r1, r1, #4 @ clear bufferable bit
#endif
str r1, [r0]
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
......
......@@ -379,11 +379,6 @@ ENTRY(cpu_arm922_set_pgd)
*/
.align 5
ENTRY(cpu_arm922_set_pmd)
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r2, r1, #0x0a @ C & Section
tst r2, #0x0b
biceq r1, r1, #4 @ clear bufferable bit
#endif
str r1, [r0]
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
......
......@@ -360,11 +360,6 @@ ENTRY(cpu_arm926_set_pgd)
*/
.align 5
ENTRY(cpu_arm926_set_pmd)
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r2, r1, #0x0a @ C & Section
tst r2, #0x0b
biceq r1, r1, #4 @ clear bufferable bit
#endif
str r1, [r0]
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
......
......@@ -583,11 +583,6 @@ ENTRY(cpu_xscale_set_pgd)
*/
.align 5
ENTRY(cpu_xscale_set_pmd)
#if PMD_CACHE_WRITE_ALLOCATE
and r2, r1, #PMD_TYPE_MASK|PMD_SECT_CACHEABLE|PMD_SECT_BUFFERABLE
cmp r2, #PMD_TYPE_SECT|PMD_SECT_CACHEABLE|PMD_SECT_BUFFERABLE
orreq r1, r1, #PMD_SECT_TEX(1)
#endif
str r1, [r0]
mov ip, #0
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
......
......@@ -38,7 +38,7 @@
#define PMD_TYPE_FAULT (0 << 0)
#define PMD_TYPE_TABLE (1 << 0)
#define PMD_TYPE_SECT (2 << 0)
#define PMD_UPDATABLE (1 << 4)
#define PMD_BIT4 (1 << 4)
#define PMD_DOMAIN(x) ((x) << 5)
#define PMD_PROTECTION (1 << 9) /* v5 */
/*
......@@ -49,6 +49,13 @@
#define PMD_SECT_AP_WRITE (1 << 10)
#define PMD_SECT_AP_READ (1 << 11)
#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
#define PMD_SECT_UNCACHED (0)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
/*
* - coarse table (not used)
*/
......@@ -184,6 +191,7 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
#define pgtable_cache_init() do { } while (0)
......
......@@ -44,6 +44,16 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
extern asmlinkage void __backtrace(void);
#define CPU_ARCH_UNKNOWN 0
#define CPU_ARCH_ARMv3 1
#define CPU_ARCH_ARMv4 2
#define CPU_ARCH_ARMv4T 3
#define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6
extern int cpu_architecture(void);
/*
* Include processor dependent parts
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment