Commit aa8359a9 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: cputable support from Will Schmidt

parent f9787d64
...@@ -9,7 +9,7 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \ ...@@ -9,7 +9,7 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \
align.o semaphore.o bitops.o stab.o htab.o pacaData.o \ align.o semaphore.o bitops.o stab.o htab.o pacaData.o \
udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \ udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
ptrace32.o signal32.o pmc.o rtc.o init_task.o \ ptrace32.o signal32.o pmc.o rtc.o init_task.o \
lmb.o pci.o pci_dn.o pci_dma.o lmb.o pci.o pci_dn.o pci_dma.o cputable.o
obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \ obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
iSeries_IoMmTable.o iSeries_irq.o \ iSeries_IoMmTable.o iSeries_irq.o \
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/cputable.h>
void disable_kernel_fp(void); /* asm function from head.S */ void disable_kernel_fp(void); /* asm function from head.S */
...@@ -238,12 +239,11 @@ fix_alignment(struct pt_regs *regs) ...@@ -238,12 +239,11 @@ fix_alignment(struct pt_regs *regs)
dsisr = regs->dsisr; dsisr = regs->dsisr;
/* Power4 doesn't set DSISR for an alignment interrupt */ if (cur_cpu_spec->cpu_features & CPU_FTR_NODSISRALIGN) {
if (!cpu_alignexc_sets_dsisr()) {
unsigned int real_instr; unsigned int real_instr;
if (__get_user(real_instr, (unsigned int *)regs->nip)) if (__get_user(real_instr, (unsigned int *)regs->nip))
return 0; return 0;
dsisr = make_dsisr(real_instr); dsisr = make_dsisr(*((unsigned *)regs->nip));
} }
/* extract the operation and registers from the dsisr */ /* extract the operation and registers from the dsisr */
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/iSeries/HvLpEvent.h> #include <asm/iSeries/HvLpEvent.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/rtas.h> #include <asm/rtas.h>
#include <asm/cputable.h>
#define DEFINE(sym, val) \ #define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val)) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
...@@ -159,5 +160,12 @@ int main(void) ...@@ -159,5 +160,12 @@ int main(void)
DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
/* About the CPU features table */
DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
return 0; return 0;
} }
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
#include "open_pic.h" #include "open_pic.h"
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/ppcdebug.h> #include <asm/ppcdebug.h>
#include <asm/cputable.h>
extern volatile unsigned char *chrp_int_ack_special; extern volatile unsigned char *chrp_int_ack_special;
...@@ -253,6 +254,34 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -253,6 +254,34 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.progress = chrp_progress; ppc_md.progress = chrp_progress;
/* build up the firmware_features bitmask field
* using contents of device-tree/ibm,hypertas-functions.
* Ultimately this functionality may be moved into prom.c prom_init().
*/
struct device_node * dn;
char * hypertas;
unsigned int len;
dn = find_path_device("/rtas");
cur_cpu_spec->firmware_features = 0;
hypertas = get_property(dn, "ibm,hypertas-functions", &len);
if (hypertas) {
while (len > 0){
int i;
/* check value against table of strings */
for(i=0; i < FIRMWARE_MAX_FEATURES ;i++) {
if ((firmware_features_table[i].name) && (strcmp(firmware_features_table[i].name,hypertas))==0) {
/* we have a match */
cur_cpu_spec->firmware_features |= (1UL << firmware_features_table[i].val);
break;
}
}
int hypertas_len = strlen(hypertas);
len -= hypertas_len +1;
hypertas+= hypertas_len +1;
}
}
udbg_printf("firmware_features bitmask: 0x%x \n",
cur_cpu_spec->firmware_features);
} }
void void
......
/*
* arch/ppc64/kernel/cputable.c
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <asm/cputable.h>
struct cpu_spec* cur_cpu_spec = NULL;
extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
/* We only set the altivec features if the kernel was compiled with altivec
* support
*/
#ifdef CONFIG_ALTIVEC
#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
#else
#define CPU_FTR_ALTIVEC_COMP 0
#endif
struct cpu_spec cpu_specs[] = {
{ /* Power3 */
0xffff0000, 0x00400000, "Power3 (630)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Power3+ */
0xffff0000, 0x00410000, "Power3 (630+)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Northstar */
0xffff0000, 0x00330000, "Northstar",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Pulsar */
0xffff0000, 0x00340000, "Pulsar",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* I-star */
0xffff0000, 0x00360000, "I-star",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* S-star */
0xffff0000, 0x00370000, "S-star",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Power4 */
0xffff0000, 0x00350000, "Power4",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
},
{ /* Power4+ */
0xffff0000, 0x00380000, "Power4+",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
},
{ /* default match */
0x00000000, 0x00000000, "(Power4-Compatible)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
}
};
firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
{FW_FEATURE_DABR, "hcall-dabr"},
{FW_FEATURE_COPY, "hcall-copy"},
{FW_FEATURE_ASR, "hcall-asr"},
{FW_FEATURE_DEBUG, "hcall-debug"},
{FW_FEATURE_PERF, "hcall-perf"},
{FW_FEATURE_DUMP, "hcall-dump"},
{FW_FEATURE_INTERRUPT, "hcall-interrupt"},
{FW_FEATURE_MIGRATE, "hcall-migrate"},
};
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/cputable.h>
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE #define DO_SOFT_DISABLE
...@@ -1267,6 +1268,11 @@ _GLOBAL(__start_initialization_iSeries) ...@@ -1267,6 +1268,11 @@ _GLOBAL(__start_initialization_iSeries)
li r0,0 li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1) stdu r0,-STACK_FRAME_OVERHEAD(r1)
LOADADDR(r3,cpu_specs)
LOADADDR(r4,cur_cpu_spec)
li r5,0
bl .identify_cpu
LOADADDR(r2,__toc_start) LOADADDR(r2,__toc_start)
addi r2,r2,0x4000 addi r2,r2,0x4000
addi r2,r2,0x4000 addi r2,r2,0x4000
...@@ -1730,6 +1736,13 @@ _STATIC(start_here_pSeries) ...@@ -1730,6 +1736,13 @@ _STATIC(start_here_pSeries)
li r0,0 li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1) stdu r0,-STACK_FRAME_OVERHEAD(r1)
LOADADDR(r3,cpu_specs)
sub r3,r3,r26
LOADADDR(r4,cur_cpu_spec)
sub r4,r4,r26
mr r5,r26
bl .identify_cpu
/* set up the TOC (physical address) */ /* set up the TOC (physical address) */
LOADADDR(r2,__toc_start) LOADADDR(r2,__toc_start)
addi r2,r2,0x4000 addi r2,r2,0x4000
...@@ -1888,6 +1901,11 @@ _STATIC(start_here_common) ...@@ -1888,6 +1901,11 @@ _STATIC(start_here_common)
bl .start_kernel bl .start_kernel
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(__setup_cpu_power4)
blr
_GLOBAL(hmt_init) _GLOBAL(hmt_init)
#ifdef CONFIG_HMT #ifdef CONFIG_HMT
LOADADDR(r5, hmt_thread_data) LOADADDR(r5, hmt_thread_data)
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <asm/eeh.h> #include <asm/eeh.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cputable.h>
/* /*
* Note: pte --> Linux PTE * Note: pte --> Linux PTE
...@@ -165,7 +166,8 @@ htab_initialize(void) ...@@ -165,7 +166,8 @@ htab_initialize(void)
mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX; mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
/* XXX we currently map kernel text rw, should fix this */ /* XXX we currently map kernel text rw, should fix this */
if (cpu_has_largepage() && systemcfg->physicalMemorySize > 256*MB) { if ((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
&& systemcfg->physicalMemorySize > 256*MB) {
create_pte_mapping((unsigned long)KERNELBASE, create_pte_mapping((unsigned long)KERNELBASE,
KERNELBASE + 256*MB, mode_rw, 0); KERNELBASE + 256*MB, mode_rw, 0);
create_pte_mapping((unsigned long)KERNELBASE + 256*MB, create_pte_mapping((unsigned long)KERNELBASE + 256*MB,
...@@ -279,7 +281,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -279,7 +281,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
#define PPC64_HWNOEXEC (1 << 2) #define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on cpus that support it */ /* We do lazy icache flushing on cpus that support it */
if (unlikely(cpu_has_noexecute() && pfn_valid(pte_pfn(new_pte)))) { if (unlikely((cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)
&& pfn_valid(pte_pfn(new_pte)))) {
struct page *page = pte_page(new_pte); struct page *page = pte_page(new_pte);
/* page is dirty */ /* page is dirty */
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cputable.h>
#include <asm/time.h> #include <asm/time.h>
#include "iSeries_setup.h" #include "iSeries_setup.h"
...@@ -254,7 +255,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig ...@@ -254,7 +255,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig
{ {
unsigned long i; unsigned long i;
unsigned long mem_blocks = 0; unsigned long mem_blocks = 0;
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries ); mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries );
else else
mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries ); mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries );
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/cputable.h>
.text .text
...@@ -444,6 +445,95 @@ _GLOBAL(cvt_df) ...@@ -444,6 +445,95 @@ _GLOBAL(cvt_df)
stfd 0,0(r5) stfd 0,0(r5)
blr blr
/*
* identify_cpu,
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
*/
_GLOBAL(identify_cpu)
mfpvr r7
1:
lwz r8,CPU_SPEC_PVR_MASK(r3)
and r8,r8,r7
lwz r9,CPU_SPEC_PVR_VALUE(r3)
cmplw 0,r9,r8
beq 1f
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
add r3,r3,r5
std r3,0(r4)
blr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
LOADADDR(r6,cur_cpu_spec)
sub r6,r6,r3
ld r4,0(r6)
sub r4,r4,r3
ld r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
LOADADDR(r6,__start___ftr_fixup)
sub r6,r6,r3
LOADADDR(r7,__stop___ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset
*
* Setup function is called with:
* r3 = data offset
* r4 = ptr to CPU spec (relocated)
*/
_GLOBAL(call_setup_cpu)
LOADADDR(r4, cur_cpu_spec)
sub r4,r4,r3
lwz r4,0(r4) # load pointer to cpu_spec
sub r4,r4,r3 # relocate
lwz r6,CPU_SPEC_SETUP(r4) # load function pointer
sub r6,r6,r3
mtctr r6
bctr
/* /*
* Create a kernel thread * Create a kernel thread
* kernel_thread(fn, arg, flags) * kernel_thread(fn, arg, flags)
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/cputable.h>
#define HPTE_LOCK_BIT 3 #define HPTE_LOCK_BIT 3
...@@ -217,7 +218,7 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, ...@@ -217,7 +218,7 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
} }
/* Ensure it is out of the tlb too */ /* Ensure it is out of the tlb too */
if (cpu_has_tlbiel() && !large && local) { if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
_tlbiel(va); _tlbiel(va);
} else { } else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags); spin_lock_irqsave(&pSeries_tlbie_lock, flags);
...@@ -283,7 +284,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -283,7 +284,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
} }
/* Invalidate the tlb */ /* Invalidate the tlb */
if (cpu_has_tlbiel() && !large && local) { if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
_tlbiel(va); _tlbiel(va);
} else { } else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags); spin_lock_irqsave(&pSeries_tlbie_lock, flags);
...@@ -346,7 +347,7 @@ static void pSeries_flush_hash_range(unsigned long context, ...@@ -346,7 +347,7 @@ static void pSeries_flush_hash_range(unsigned long context,
j++; j++;
} }
if (cpu_has_tlbiel() && !large && local) { if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++) { for (i = 0; i < j; i++) {
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/iSeries/HvCallHpt.h> #include <asm/iSeries/HvCallHpt.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/cputable.h>
struct task_struct *last_task_used_math = NULL; struct task_struct *last_task_used_math = NULL;
...@@ -412,7 +413,7 @@ void initialize_paca_hardware_interrupt_stack(void) ...@@ -412,7 +413,7 @@ void initialize_paca_hardware_interrupt_stack(void)
* __get_free_pages() might give us a page > KERNBASE+256M which * __get_free_pages() might give us a page > KERNBASE+256M which
* is mapped with large ptes so we can't set up the guard page. * is mapped with large ptes so we can't set up the guard page.
*/ */
if (cpu_has_largepage()) if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
return; return;
for (i=0; i < NR_CPUS; i++) { for (i=0; i < NR_CPUS; i++) {
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include "open_pic.h" #include "open_pic.h"
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/cputable.h>
int smp_threads_ready; int smp_threads_ready;
unsigned long cache_decay_ticks; unsigned long cache_decay_ticks;
...@@ -583,7 +584,7 @@ int __devinit __cpu_up(unsigned int cpu) ...@@ -583,7 +584,7 @@ int __devinit __cpu_up(unsigned int cpu)
paca[cpu].prof_multiplier = 1; paca[cpu].prof_multiplier = 1;
paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock; paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
if (!cpu_has_slb()) { if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
void *tmp; void *tmp;
/* maximum of 48 CPUs on machines with a segment table */ /* maximum of 48 CPUs on machines with a segment table */
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/naca.h> #include <asm/naca.h>
#include <asm/pmc.h> #include <asm/pmc.h>
#include <asm/cputable.h>
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid); int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid, int large, void make_slbe(unsigned long esid, unsigned long vsid, int large,
...@@ -38,7 +39,7 @@ void stab_initialize(unsigned long stab) ...@@ -38,7 +39,7 @@ void stab_initialize(unsigned long stab)
esid = GET_ESID(KERNELBASE); esid = GET_ESID(KERNELBASE);
vsid = get_kernel_vsid(esid << SID_SHIFT); vsid = get_kernel_vsid(esid << SID_SHIFT);
if (cpu_has_slb()) { if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
/* Invalidate the entire SLB & all the ERATS */ /* Invalidate the entire SLB & all the ERATS */
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
asm volatile("isync; slbia; isync":::"memory"); asm volatile("isync; slbia; isync":::"memory");
...@@ -222,7 +223,7 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large, ...@@ -222,7 +223,7 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large,
static inline void __ste_allocate(unsigned long esid, unsigned long vsid, static inline void __ste_allocate(unsigned long esid, unsigned long vsid,
int kernel_segment) int kernel_segment)
{ {
if (cpu_has_slb()) { if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
if (REGION_ID(esid << SID_SHIFT) == KERNEL_REGION_ID) if (REGION_ID(esid << SID_SHIFT) == KERNEL_REGION_ID)
make_slbe(esid, vsid, 1, kernel_segment); make_slbe(esid, vsid, 1, kernel_segment);
...@@ -275,7 +276,7 @@ int ste_allocate(unsigned long ea) ...@@ -275,7 +276,7 @@ int ste_allocate(unsigned long ea)
esid = GET_ESID(ea); esid = GET_ESID(ea);
__ste_allocate(esid, vsid, kernel_segment); __ste_allocate(esid, vsid, kernel_segment);
if (!cpu_has_slb()) { if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
/* Order update */ /* Order update */
asm volatile("sync":::"memory"); asm volatile("sync":::"memory");
} }
...@@ -327,7 +328,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -327,7 +328,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
} }
} }
if (!cpu_has_slb()) { if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
/* Order update */ /* Order update */
asm volatile("sync" : : : "memory"); asm volatile("sync" : : : "memory");
} }
...@@ -336,7 +337,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -336,7 +337,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Flush all user entries from the segment table of the current processor. */ /* Flush all user entries from the segment table of the current processor. */
void flush_stab(struct task_struct *tsk, struct mm_struct *mm) void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
{ {
if (cpu_has_slb()) { if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
/* /*
* XXX disable 32bit slb invalidate optimisation until we fix * XXX disable 32bit slb invalidate optimisation until we fix
* the issue where a 32bit app execed out of a 64bit app can * the issue where a 32bit app execed out of a 64bit app can
......
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#include <asm/eeh.h> #include <asm/eeh.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmzone.h> #include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/ppcdebug.h> #include <asm/ppcdebug.h>
...@@ -699,7 +700,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, ...@@ -699,7 +700,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
int local = 0; int local = 0;
/* handle i-cache coherency */ /* handle i-cache coherency */
if (!cpu_has_noexecute()) { if (!(cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)) {
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
......
...@@ -65,6 +65,10 @@ SECTIONS ...@@ -65,6 +65,10 @@ SECTIONS
__ex_table : { *(__ex_table) } __ex_table : { *(__ex_table) }
__stop___ex_table = .; __stop___ex_table = .;
__start___ftr_fixup = .;
__ftr_fixup : { *(__ftr_fixup) }
__stop___ftr_fixup = .;
. = ALIGN(16384); /* init_task */ . = ALIGN(16384); /* init_task */
.data.init_task : { *(.data.init_task) } .data.init_task : { *(.data.init_task) }
......
...@@ -730,18 +730,6 @@ static inline void prefetchw(const void *x) ...@@ -730,18 +730,6 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x) #define spin_lock_prefetch(x) prefetchw(x)
#define cpu_has_largepage() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_slb() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_tlbiel() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_noexecute() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
/* XXX we have to call HV to set when in LPAR */ /* XXX we have to call HV to set when in LPAR */
#define cpu_has_dabr() (1) #define cpu_has_dabr() (1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment