Commit dfafbae1 authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/tmp3
parents 4949833a d0c0cf74
...@@ -9,7 +9,7 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \ ...@@ -9,7 +9,7 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \
align.o semaphore.o bitops.o stab.o htab.o pacaData.o \ align.o semaphore.o bitops.o stab.o htab.o pacaData.o \
udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \ udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
ptrace32.o signal32.o pmc.o rtc.o init_task.o \ ptrace32.o signal32.o pmc.o rtc.o init_task.o \
lmb.o pci.o pci_dn.o pci_dma.o lmb.o pci.o pci_dn.o pci_dma.o cputable.o
obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \ obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
iSeries_IoMmTable.o iSeries_irq.o \ iSeries_IoMmTable.o iSeries_irq.o \
...@@ -19,11 +19,11 @@ obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \ ...@@ -19,11 +19,11 @@ obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
mf.o HvLpEvent.o iSeries_proc.o mf.o HvLpEvent.o iSeries_proc.o
obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
eeh.o rtasd.o nvram.o eeh.o rtasd.o nvram.o ras.o
# Change this to pSeries only once we've got iSeries up to date # Change this to pSeries only once we've got iSeries up to date
obj-y += open_pic.o xics.o pSeries_htab.o rtas.o \ obj-y += open_pic.o xics.o pSeries_htab.o rtas.o \
chrp_setup.o i8259.o ras.o prom.o chrp_setup.o i8259.o prom.o
obj-$(CONFIG_PROC_FS) += proc_ppc64.o obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/cputable.h>
void disable_kernel_fp(void); /* asm function from head.S */ void disable_kernel_fp(void); /* asm function from head.S */
...@@ -238,12 +239,11 @@ fix_alignment(struct pt_regs *regs) ...@@ -238,12 +239,11 @@ fix_alignment(struct pt_regs *regs)
dsisr = regs->dsisr; dsisr = regs->dsisr;
/* Power4 doesn't set DSISR for an alignment interrupt */ if (cur_cpu_spec->cpu_features & CPU_FTR_NODSISRALIGN) {
if (!cpu_alignexc_sets_dsisr()) { unsigned int real_instr;
unsigned int real_instr; if (__get_user(real_instr, (unsigned int *)regs->nip))
if (__get_user(real_instr, (unsigned int *)regs->nip)) return 0;
return 0; dsisr = make_dsisr(*((unsigned *)regs->nip));
dsisr = make_dsisr(real_instr);
} }
/* extract the operation and registers from the dsisr */ /* extract the operation and registers from the dsisr */
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/iSeries/HvLpEvent.h> #include <asm/iSeries/HvLpEvent.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/rtas.h> #include <asm/rtas.h>
#include <asm/cputable.h>
#define DEFINE(sym, val) \ #define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val)) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
...@@ -159,5 +160,12 @@ int main(void) ...@@ -159,5 +160,12 @@ int main(void)
DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
/* About the CPU features table */
DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
return 0; return 0;
} }
...@@ -62,13 +62,13 @@ ...@@ -62,13 +62,13 @@
#include "open_pic.h" #include "open_pic.h"
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/ppcdebug.h> #include <asm/ppcdebug.h>
#include <asm/cputable.h>
extern volatile unsigned char *chrp_int_ack_special; extern volatile unsigned char *chrp_int_ack_special;
void chrp_progress(char *, unsigned short); void chrp_progress(char *, unsigned short);
extern void openpic_init_IRQ(void); extern void openpic_init_IRQ(void);
extern void init_ras_IRQ(void);
extern void find_and_init_phbs(void); extern void find_and_init_phbs(void);
...@@ -238,7 +238,6 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -238,7 +238,6 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.init_IRQ = xics_init_IRQ; ppc_md.init_IRQ = xics_init_IRQ;
ppc_md.get_irq = xics_get_irq; ppc_md.get_irq = xics_get_irq;
} }
ppc_md.init_ras_IRQ = init_ras_IRQ;
ppc_md.init = chrp_init2; ppc_md.init = chrp_init2;
...@@ -253,6 +252,34 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5, ...@@ -253,6 +252,34 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.progress = chrp_progress; ppc_md.progress = chrp_progress;
/* build up the firmware_features bitmask field
* using contents of device-tree/ibm,hypertas-functions.
* Ultimately this functionality may be moved into prom.c prom_init().
*/
struct device_node * dn;
char * hypertas;
unsigned int len;
dn = find_path_device("/rtas");
cur_cpu_spec->firmware_features = 0;
hypertas = get_property(dn, "ibm,hypertas-functions", &len);
if (hypertas) {
while (len > 0){
int i;
/* check value against table of strings */
for(i=0; i < FIRMWARE_MAX_FEATURES ;i++) {
if ((firmware_features_table[i].name) && (strcmp(firmware_features_table[i].name,hypertas))==0) {
/* we have a match */
cur_cpu_spec->firmware_features |= (1UL << firmware_features_table[i].val);
break;
}
}
int hypertas_len = strlen(hypertas);
len -= hypertas_len +1;
hypertas+= hypertas_len +1;
}
}
udbg_printf("firmware_features bitmask: 0x%x \n",
cur_cpu_spec->firmware_features);
} }
void void
......
/*
* arch/ppc64/kernel/cputable.c
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <asm/cputable.h>
struct cpu_spec* cur_cpu_spec = NULL;
extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
/* We only set the altivec features if the kernel was compiled with altivec
* support
*/
#ifdef CONFIG_ALTIVEC
#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
#else
#define CPU_FTR_ALTIVEC_COMP 0
#endif
struct cpu_spec cpu_specs[] = {
{ /* Power3 */
0xffff0000, 0x00400000, "Power3 (630)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Power3+ */
0xffff0000, 0x00410000, "Power3 (630+)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Northstar */
0xffff0000, 0x00330000, "Northstar",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Pulsar */
0xffff0000, 0x00340000, "Pulsar",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* I-star */
0xffff0000, 0x00360000, "I-star",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* S-star */
0xffff0000, 0x00370000, "S-star",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Power4 */
0xffff0000, 0x00350000, "Power4",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
},
{ /* Power4+ */
0xffff0000, 0x00380000, "Power4+",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
},
{ /* default match */
0x00000000, 0x00000000, "(Power4-Compatible)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
}
};
firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
{FW_FEATURE_DABR, "hcall-dabr"},
{FW_FEATURE_COPY, "hcall-copy"},
{FW_FEATURE_ASR, "hcall-asr"},
{FW_FEATURE_DEBUG, "hcall-debug"},
{FW_FEATURE_PERF, "hcall-perf"},
{FW_FEATURE_DUMP, "hcall-dump"},
{FW_FEATURE_INTERRUPT, "hcall-interrupt"},
{FW_FEATURE_MIGRATE, "hcall-migrate"},
};
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/cputable.h>
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE #define DO_SOFT_DISABLE
...@@ -1267,6 +1268,11 @@ _GLOBAL(__start_initialization_iSeries) ...@@ -1267,6 +1268,11 @@ _GLOBAL(__start_initialization_iSeries)
li r0,0 li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1) stdu r0,-STACK_FRAME_OVERHEAD(r1)
LOADADDR(r3,cpu_specs)
LOADADDR(r4,cur_cpu_spec)
li r5,0
bl .identify_cpu
LOADADDR(r2,__toc_start) LOADADDR(r2,__toc_start)
addi r2,r2,0x4000 addi r2,r2,0x4000
addi r2,r2,0x4000 addi r2,r2,0x4000
...@@ -1730,6 +1736,13 @@ _STATIC(start_here_pSeries) ...@@ -1730,6 +1736,13 @@ _STATIC(start_here_pSeries)
li r0,0 li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1) stdu r0,-STACK_FRAME_OVERHEAD(r1)
LOADADDR(r3,cpu_specs)
sub r3,r3,r26
LOADADDR(r4,cur_cpu_spec)
sub r4,r4,r26
mr r5,r26
bl .identify_cpu
/* set up the TOC (physical address) */ /* set up the TOC (physical address) */
LOADADDR(r2,__toc_start) LOADADDR(r2,__toc_start)
addi r2,r2,0x4000 addi r2,r2,0x4000
...@@ -1888,6 +1901,11 @@ _STATIC(start_here_common) ...@@ -1888,6 +1901,11 @@ _STATIC(start_here_common)
bl .start_kernel bl .start_kernel
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(__setup_cpu_power4)
blr
_GLOBAL(hmt_init) _GLOBAL(hmt_init)
#ifdef CONFIG_HMT #ifdef CONFIG_HMT
LOADADDR(r5, hmt_thread_data) LOADADDR(r5, hmt_thread_data)
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <asm/eeh.h> #include <asm/eeh.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cputable.h>
/* /*
* Note: pte --> Linux PTE * Note: pte --> Linux PTE
...@@ -165,7 +166,8 @@ htab_initialize(void) ...@@ -165,7 +166,8 @@ htab_initialize(void)
mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX; mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
/* XXX we currently map kernel text rw, should fix this */ /* XXX we currently map kernel text rw, should fix this */
if (cpu_has_largepage() && systemcfg->physicalMemorySize > 256*MB) { if ((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
&& systemcfg->physicalMemorySize > 256*MB) {
create_pte_mapping((unsigned long)KERNELBASE, create_pte_mapping((unsigned long)KERNELBASE,
KERNELBASE + 256*MB, mode_rw, 0); KERNELBASE + 256*MB, mode_rw, 0);
create_pte_mapping((unsigned long)KERNELBASE + 256*MB, create_pte_mapping((unsigned long)KERNELBASE + 256*MB,
...@@ -279,7 +281,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -279,7 +281,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
#define PPC64_HWNOEXEC (1 << 2) #define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on cpus that support it */ /* We do lazy icache flushing on cpus that support it */
if (unlikely(cpu_has_noexecute() && pfn_valid(pte_pfn(new_pte)))) { if (unlikely((cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)
&& pfn_valid(pte_pfn(new_pte)))) {
struct page *page = pte_page(new_pte); struct page *page = pte_page(new_pte);
/* page is dirty */ /* page is dirty */
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cputable.h>
#include <asm/time.h> #include <asm/time.h>
#include "iSeries_setup.h" #include "iSeries_setup.h"
...@@ -254,7 +255,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig ...@@ -254,7 +255,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig
{ {
unsigned long i; unsigned long i;
unsigned long mem_blocks = 0; unsigned long mem_blocks = 0;
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries ); mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries );
else else
mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries ); mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries );
...@@ -311,7 +312,6 @@ iSeries_init_early(void) ...@@ -311,7 +312,6 @@ iSeries_init_early(void)
ppc_md.setup_residual = iSeries_setup_residual; ppc_md.setup_residual = iSeries_setup_residual;
ppc_md.get_cpuinfo = iSeries_get_cpuinfo; ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
ppc_md.init_IRQ = iSeries_init_IRQ; ppc_md.init_IRQ = iSeries_init_IRQ;
ppc_md.init_ras_IRQ = NULL;
ppc_md.get_irq = iSeries_get_irq; ppc_md.get_irq = iSeries_get_irq;
ppc_md.init = NULL; ppc_md.init = NULL;
......
...@@ -596,7 +596,6 @@ void __init init_IRQ(void) ...@@ -596,7 +596,6 @@ void __init init_IRQ(void)
once++; once++;
ppc_md.init_IRQ(); ppc_md.init_IRQ();
if(ppc_md.init_ras_IRQ) ppc_md.init_ras_IRQ();
} }
static struct proc_dir_entry * root_irq_dir; static struct proc_dir_entry * root_irq_dir;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/cputable.h>
.text .text
...@@ -444,6 +445,95 @@ _GLOBAL(cvt_df) ...@@ -444,6 +445,95 @@ _GLOBAL(cvt_df)
stfd 0,0(r5) stfd 0,0(r5)
blr blr
/*
* identify_cpu,
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
*/
_GLOBAL(identify_cpu)
mfpvr r7
1:
lwz r8,CPU_SPEC_PVR_MASK(r3)
and r8,r8,r7
lwz r9,CPU_SPEC_PVR_VALUE(r3)
cmplw 0,r9,r8
beq 1f
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
add r3,r3,r5
std r3,0(r4)
blr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
LOADADDR(r6,cur_cpu_spec)
sub r6,r6,r3
ld r4,0(r6)
sub r4,r4,r3
ld r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
LOADADDR(r6,__start___ftr_fixup)
sub r6,r6,r3
LOADADDR(r7,__stop___ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset
*
* Setup function is called with:
* r3 = data offset
* r4 = ptr to CPU spec (relocated)
*/
_GLOBAL(call_setup_cpu)
LOADADDR(r4, cur_cpu_spec)
sub r4,r4,r3
lwz r4,0(r4) # load pointer to cpu_spec
sub r4,r4,r3 # relocate
lwz r6,CPU_SPEC_SETUP(r4) # load function pointer
sub r6,r6,r3
mtctr r6
bctr
/* /*
* Create a kernel thread * Create a kernel thread
* kernel_thread(fn, arg, flags) * kernel_thread(fn, arg, flags)
......
...@@ -376,15 +376,57 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -376,15 +376,57 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return 0; return 0;
} }
LIST_HEAD(module_bug_list);
int module_finalize(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me) const Elf_Shdr *sechdrs, struct module *me)
{ {
char *secstrings;
unsigned int i;
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
continue;
me->arch.bug_table = (void *) sechdrs[i].sh_addr;
me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
break;
}
/*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&me->arch.bug_list, &module_bug_list);
sort_ex_table((struct exception_table_entry *)me->extable, sort_ex_table((struct exception_table_entry *)me->extable,
(struct exception_table_entry *)me->extable + (struct exception_table_entry *)me->extable +
me->num_exentries); me->num_exentries);
return 0; return 0;
} }
void module_arch_cleanup(struct module *mod) void module_arch_cleanup(struct module *mod)
{ {
list_del(&mod->arch.bug_list);
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
{
struct mod_arch_specific *mod;
unsigned int i;
struct bug_entry *bug;
list_for_each_entry(mod, &module_bug_list, bug_list) {
bug = mod->bug_table;
for (i = 0; i < mod->num_bugs; ++i, ++bug)
if (bugaddr == bug->bug_addr)
return bug;
}
return NULL;
} }
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/cputable.h>
#define HPTE_LOCK_BIT 3 #define HPTE_LOCK_BIT 3
...@@ -217,7 +218,7 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, ...@@ -217,7 +218,7 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
} }
/* Ensure it is out of the tlb too */ /* Ensure it is out of the tlb too */
if (cpu_has_tlbiel() && !large && local) { if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
_tlbiel(va); _tlbiel(va);
} else { } else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags); spin_lock_irqsave(&pSeries_tlbie_lock, flags);
...@@ -283,7 +284,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -283,7 +284,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
} }
/* Invalidate the tlb */ /* Invalidate the tlb */
if (cpu_has_tlbiel() && !large && local) { if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
_tlbiel(va); _tlbiel(va);
} else { } else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags); spin_lock_irqsave(&pSeries_tlbie_lock, flags);
...@@ -346,7 +347,7 @@ static void pSeries_flush_hash_range(unsigned long context, ...@@ -346,7 +347,7 @@ static void pSeries_flush_hash_range(unsigned long context,
j++; j++;
} }
if (cpu_has_tlbiel() && !large && local) { if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++) { for (i = 0; i < j; i++) {
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/iSeries/HvCallHpt.h> #include <asm/iSeries/HvCallHpt.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/cputable.h>
struct task_struct *last_task_used_math = NULL; struct task_struct *last_task_used_math = NULL;
...@@ -412,7 +413,7 @@ void initialize_paca_hardware_interrupt_stack(void) ...@@ -412,7 +413,7 @@ void initialize_paca_hardware_interrupt_stack(void)
* __get_free_pages() might give us a page > KERNBASE+256M which * __get_free_pages() might give us a page > KERNBASE+256M which
* is mapped with large ptes so we can't set up the guard page. * is mapped with large ptes so we can't set up the guard page.
*/ */
if (cpu_has_largepage()) if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
return; return;
for (i=0; i < NR_CPUS; i++) { for (i=0; i < NR_CPUS; i++) {
......
...@@ -58,7 +58,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id, ...@@ -58,7 +58,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
struct pt_regs * regs); struct pt_regs * regs);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id, static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
struct pt_regs * regs); struct pt_regs * regs);
void init_ras_IRQ(void);
/* #define DEBUG */ /* #define DEBUG */
...@@ -66,7 +65,8 @@ void init_ras_IRQ(void); ...@@ -66,7 +65,8 @@ void init_ras_IRQ(void);
* Initialize handlers for the set of interrupts caused by hardware errors * Initialize handlers for the set of interrupts caused by hardware errors
* and power system events. * and power system events.
*/ */
void init_ras_IRQ(void) { static int __init init_ras_IRQ(void)
{
struct device_node *np; struct device_node *np;
unsigned int *ireg, len, i; unsigned int *ireg, len, i;
...@@ -91,7 +91,10 @@ void init_ras_IRQ(void) { ...@@ -91,7 +91,10 @@ void init_ras_IRQ(void) {
ireg++; ireg++;
} }
} }
return 1;
} }
__initcall(init_ras_IRQ);
/* /*
* Handle power subsystem events (EPOW). * Handle power subsystem events (EPOW).
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include "open_pic.h" #include "open_pic.h"
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/xics.h> #include <asm/xics.h>
#include <asm/cputable.h>
int smp_threads_ready; int smp_threads_ready;
unsigned long cache_decay_ticks; unsigned long cache_decay_ticks;
...@@ -583,7 +584,7 @@ int __devinit __cpu_up(unsigned int cpu) ...@@ -583,7 +584,7 @@ int __devinit __cpu_up(unsigned int cpu)
paca[cpu].prof_multiplier = 1; paca[cpu].prof_multiplier = 1;
paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock; paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
if (!cpu_has_slb()) { if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
void *tmp; void *tmp;
/* maximum of 48 CPUs on machines with a segment table */ /* maximum of 48 CPUs on machines with a segment table */
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/naca.h> #include <asm/naca.h>
#include <asm/pmc.h> #include <asm/pmc.h>
#include <asm/cputable.h>
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid); int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid, int large, void make_slbe(unsigned long esid, unsigned long vsid, int large,
...@@ -38,7 +39,7 @@ void stab_initialize(unsigned long stab) ...@@ -38,7 +39,7 @@ void stab_initialize(unsigned long stab)
esid = GET_ESID(KERNELBASE); esid = GET_ESID(KERNELBASE);
vsid = get_kernel_vsid(esid << SID_SHIFT); vsid = get_kernel_vsid(esid << SID_SHIFT);
if (cpu_has_slb()) { if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
/* Invalidate the entire SLB & all the ERATS */ /* Invalidate the entire SLB & all the ERATS */
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
asm volatile("isync; slbia; isync":::"memory"); asm volatile("isync; slbia; isync":::"memory");
...@@ -222,7 +223,7 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large, ...@@ -222,7 +223,7 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large,
static inline void __ste_allocate(unsigned long esid, unsigned long vsid, static inline void __ste_allocate(unsigned long esid, unsigned long vsid,
int kernel_segment) int kernel_segment)
{ {
if (cpu_has_slb()) { if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
if (REGION_ID(esid << SID_SHIFT) == KERNEL_REGION_ID) if (REGION_ID(esid << SID_SHIFT) == KERNEL_REGION_ID)
make_slbe(esid, vsid, 1, kernel_segment); make_slbe(esid, vsid, 1, kernel_segment);
...@@ -275,7 +276,7 @@ int ste_allocate(unsigned long ea) ...@@ -275,7 +276,7 @@ int ste_allocate(unsigned long ea)
esid = GET_ESID(ea); esid = GET_ESID(ea);
__ste_allocate(esid, vsid, kernel_segment); __ste_allocate(esid, vsid, kernel_segment);
if (!cpu_has_slb()) { if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
/* Order update */ /* Order update */
asm volatile("sync":::"memory"); asm volatile("sync":::"memory");
} }
...@@ -327,7 +328,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -327,7 +328,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
} }
} }
if (!cpu_has_slb()) { if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
/* Order update */ /* Order update */
asm volatile("sync" : : : "memory"); asm volatile("sync" : : : "memory");
} }
...@@ -336,7 +337,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -336,7 +337,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Flush all user entries from the segment table of the current processor. */ /* Flush all user entries from the segment table of the current processor. */
void flush_stab(struct task_struct *tsk, struct mm_struct *mm) void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
{ {
if (cpu_has_slb()) { if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
/* /*
* XXX disable 32bit slb invalidate optimisation until we fix * XXX disable 32bit slb invalidate optimisation until we fix
* the issue where a 32bit app execed out of a 64bit app can * the issue where a 32bit app execed out of a 64bit app can
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -306,6 +307,56 @@ static void parse_fpe(struct pt_regs *regs) ...@@ -306,6 +307,56 @@ static void parse_fpe(struct pt_regs *regs)
_exception(SIGFPE, &info, regs); _exception(SIGFPE, &info, regs);
} }
/*
* Look through the list of trap instructions that are used for BUG(),
* BUG_ON() and WARN_ON() and see if we hit one. At this point we know
* that the exception was caused by a trap instruction of some kind.
* Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
* otherwise.
*/
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
#ifndef CONFIG_MODULES
#define module_find_bug(x) NULL
#endif
static struct bug_entry *find_bug(unsigned long bugaddr)
{
struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug->bug_addr)
return bug;
return module_find_bug(bugaddr);
}
int
check_bug_trap(struct pt_regs *regs)
{
struct bug_entry *bug;
unsigned long addr;
if (regs->msr & MSR_PR)
return 0; /* not in kernel */
addr = regs->nip; /* address of trap instruction */
if (addr < PAGE_OFFSET)
return 0;
bug = find_bug(regs->nip);
if (bug == NULL)
return 0;
if (bug->line & BUG_WARNING_TRAP) {
/* this is a WARN_ON rather than BUG/BUG_ON */
printk(KERN_ERR "Badness in %s at %s:%d\n",
bug->function, bug->file,
bug->line & ~BUG_WARNING_TRAP);
dump_stack();
return 1;
}
printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
bug->function, bug->file, bug->line);
return 0;
}
void void
ProgramCheckException(struct pt_regs *regs) ProgramCheckException(struct pt_regs *regs)
{ {
...@@ -330,6 +381,10 @@ ProgramCheckException(struct pt_regs *regs) ...@@ -330,6 +381,10 @@ ProgramCheckException(struct pt_regs *regs)
if (debugger_bpt && debugger_bpt(regs)) if (debugger_bpt && debugger_bpt(regs))
return; return;
#endif #endif
if (check_bug_trap(regs)) {
regs->nip += 4;
return;
}
info.si_signo = SIGTRAP; info.si_signo = SIGTRAP;
info.si_errno = 0; info.si_errno = 0;
info.si_code = TRAP_BRKPT; info.si_code = TRAP_BRKPT;
......
...@@ -2,7 +2,5 @@ ...@@ -2,7 +2,5 @@
# Makefile for ppc64-specific library files.. # Makefile for ppc64-specific library files..
# #
L_TARGET = lib.a lib-y := checksum.o dec_and_lock.o string.o strcase.o
lib-y += copypage.o memcpy.o copyuser.o
obj-y := checksum.o dec_and_lock.o string.o strcase.o
obj-y += copypage.o memcpy.o copyuser.o
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#include <asm/eeh.h> #include <asm/eeh.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmzone.h> #include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/ppcdebug.h> #include <asm/ppcdebug.h>
...@@ -512,7 +513,7 @@ void __init paging_init(void) ...@@ -512,7 +513,7 @@ void __init paging_init(void)
static struct kcore_list kcore_vmem; static struct kcore_list kcore_vmem;
static void setup_kcore(void) static int __init setup_kcore(void)
{ {
int i; int i;
...@@ -536,7 +537,10 @@ static void setup_kcore(void) ...@@ -536,7 +537,10 @@ static void setup_kcore(void)
} }
kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
return 0;
} }
module_init(setup_kcore);
void initialize_paca_hardware_interrupt_stack(void); void initialize_paca_hardware_interrupt_stack(void);
...@@ -606,8 +610,6 @@ void __init mem_init(void) ...@@ -606,8 +610,6 @@ void __init mem_init(void)
#endif #endif
mem_init_done = 1; mem_init_done = 1;
setup_kcore();
/* set the last page of each hardware interrupt stack to be protected */ /* set the last page of each hardware interrupt stack to be protected */
initialize_paca_hardware_interrupt_stack(); initialize_paca_hardware_interrupt_stack();
...@@ -698,7 +700,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, ...@@ -698,7 +700,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
int local = 0; int local = 0;
/* handle i-cache coherency */ /* handle i-cache coherency */
if (!cpu_has_noexecute()) { if (!(cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)) {
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
......
...@@ -154,8 +154,28 @@ static int __init parse_numa_properties(void) ...@@ -154,8 +154,28 @@ static int __init parse_numa_properties(void)
if (max_domain < numa_domain) if (max_domain < numa_domain)
max_domain = numa_domain; max_domain = numa_domain;
node_data[numa_domain].node_start_pfn = start / PAGE_SIZE; /*
node_data[numa_domain].node_size = size / PAGE_SIZE; * For backwards compatibility, OF splits the first node
* into two regions (the first being 0-4GB). Check for
* this simple case and complain if there is a gap in
* memory
*/
if (node_data[numa_domain].node_size) {
unsigned long shouldstart =
node_data[numa_domain].node_start_pfn +
node_data[numa_domain].node_size;
if (shouldstart != (start / PAGE_SIZE)) {
printk(KERN_ERR "Hole in node, disabling "
"region start %lx length %lx\n",
start, size);
continue;
}
node_data[numa_domain].node_size += size / PAGE_SIZE;
} else {
node_data[numa_domain].node_start_pfn =
start / PAGE_SIZE;
node_data[numa_domain].node_size = size / PAGE_SIZE;
}
for (i = start ; i < (start+size); i += MEMORY_INCREMENT) for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
...@@ -174,6 +194,20 @@ static int __init parse_numa_properties(void) ...@@ -174,6 +194,20 @@ static int __init parse_numa_properties(void)
return 0; return 0;
} }
void setup_nonnuma(void)
{
unsigned long i;
for (i = 0; i < NR_CPUS; i++)
map_cpu_to_node(i, 0);
node_data[0].node_start_pfn = 0;
node_data[0].node_size = lmb_end_of_DRAM() / PAGE_SIZE;
for (i = 0 ; i < lmb_end_of_DRAM(); i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
}
void __init do_init_bootmem(void) void __init do_init_bootmem(void)
{ {
int nid; int nid;
...@@ -181,9 +215,8 @@ void __init do_init_bootmem(void) ...@@ -181,9 +215,8 @@ void __init do_init_bootmem(void)
min_low_pfn = 0; min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
/* XXX FIXME: support machines without associativity information */
if (parse_numa_properties()) if (parse_numa_properties())
BUG(); setup_nonnuma();
for (nid = 0; nid < numnodes; nid++) { for (nid = 0; nid < numnodes; nid++) {
unsigned long start_paddr, end_paddr; unsigned long start_paddr, end_paddr;
...@@ -204,7 +237,7 @@ void __init do_init_bootmem(void) ...@@ -204,7 +237,7 @@ void __init do_init_bootmem(void)
NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
bootmap_pages = bootmem_bootmap_pages(end_paddr - start_paddr); bootmap_pages = bootmem_bootmap_pages((end_paddr - start_paddr) >> PAGE_SHIFT);
dbg("bootmap_pages = %lx\n", bootmap_pages); dbg("bootmap_pages = %lx\n", bootmap_pages);
bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
......
...@@ -65,6 +65,14 @@ SECTIONS ...@@ -65,6 +65,14 @@ SECTIONS
__ex_table : { *(__ex_table) } __ex_table : { *(__ex_table) }
__stop___ex_table = .; __stop___ex_table = .;
__start___bug_table = .;
__bug_table : { *(__bug_table) }
__stop___bug_table = .;
__start___ftr_fixup = .;
__ftr_fixup : { *(__ftr_fixup) }
__stop___ftr_fixup = .;
. = ALIGN(16384); /* init_task */ . = ALIGN(16384); /* init_task */
.data.init_task : { *(.data.init_task) } .data.init_task : { *(.data.init_task) }
......
#ifndef _PPC64_BUG_H #ifndef _PPC64_BUG_H
#define _PPC64_BUG_H #define _PPC64_BUG_H
#include <linux/config.h>
/* /*
* Define an illegal instr to trap on the bug. * Define an illegal instr to trap on the bug.
* We don't use 0 because that marks the end of a function * We don't use 0 because that marks the end of a function
...@@ -13,29 +11,48 @@ ...@@ -13,29 +11,48 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_XMON struct bug_entry {
struct pt_regs; unsigned long bug_addr;
extern void xmon(struct pt_regs *excp); long line;
#define BUG() do { \ const char *file;
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ const char *function;
xmon(0); \ };
} while (0)
#else /*
#define BUG() do { \ * If this bit is set in the line number it means that the trap
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ * is for WARN_ON rather than BUG or BUG_ON.
__asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \ */
#define BUG_WARNING_TRAP 0x1000000
#define BUG() do { \
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
".section __bug_table,\"a\"\n\t" \
" .llong 1b,%0,%1,%2\n" \
".previous" \
: : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
} while (0) } while (0)
#endif
#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) #define BUG_ON(x) do { \
__asm__ __volatile__( \
"1: tdnei %0,0\n" \
".section __bug_table,\"a\"\n\t" \
" .llong 1b,%1,%2,%3\n" \
".previous" \
: : "r" (x), "i" (__LINE__), "i" (__FILE__), \
"i" (__FUNCTION__)); \
} while (0)
#define PAGE_BUG(page) do { BUG(); } while (0) #define PAGE_BUG(page) do { BUG(); } while (0)
#define WARN_ON(condition) do { \ #define WARN_ON(x) do { \
if (unlikely((condition)!=0)) { \ __asm__ __volatile__( \
printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ "1: tdnei %0,0\n" \
dump_stack(); \ ".section __bug_table,\"a\"\n\t" \
} \ " .llong 1b,%1,%2,%3\n" \
".previous" \
: : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP), \
"i" (__FILE__), "i" (__FUNCTION__)); \
} while (0) } while (0)
#endif #endif
......
/*
* include/asm-ppc64/cputable.h
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_PPC_CPUTABLE_H
#define __ASM_PPC_CPUTABLE_H
/* Exposed to userland CPU features - Must match ppc32 definitions */
#define PPC_FEATURE_32 0x80000000
#define PPC_FEATURE_64 0x40000000
#define PPC_FEATURE_601_INSTR 0x20000000
#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
#define PPC_FEATURE_HAS_FPU 0x08000000
#define PPC_FEATURE_HAS_MMU 0x04000000
#define PPC_FEATURE_HAS_4xxMAC 0x02000000
#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/* This structure can grow, it's real size is used by head.S code
* via the mkdefs mecanism.
*/
struct cpu_spec;
typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
struct cpu_spec {
/* CPU is matched via (PVR & pvr_mask) == pvr_value */
unsigned int pvr_mask;
unsigned int pvr_value;
char *cpu_name;
unsigned long cpu_features; /* Kernel features */
unsigned int cpu_user_features; /* Userland features */
/* cache line sizes */
unsigned int icache_bsize;
unsigned int dcache_bsize;
/* this is called to initialize various CPU bits like L1 cache,
* BHT, SPD, etc... from head.S before branching to identify_machine
*/
cpu_setup_t cpu_setup;
/* This is used to identify firmware features which are available
* to the kernel.
*/
unsigned long firmware_features;
};
extern struct cpu_spec cpu_specs[];
extern struct cpu_spec *cur_cpu_spec;
/* firmware feature bitmask values */
#define FIRMWARE_MAX_FEATURES 63
#define FW_FEATURE_PFT (1UL<<0)
#define FW_FEATURE_TCE (1UL<<1)
#define FW_FEATURE_SPRG0 (1UL<<2)
#define FW_FEATURE_DABR (1UL<<3)
#define FW_FEATURE_COPY (1UL<<4)
#define FW_FEATURE_ASR (1UL<<5)
#define FW_FEATURE_DEBUG (1UL<<6)
#define FW_FEATURE_PERF (1UL<<7)
#define FW_FEATURE_DUMP (1UL<<8)
#define FW_FEATURE_INTERRUPT (1UL<<9)
#define FW_FEATURE_MIGRATE (1UL<<10)
typedef struct {
unsigned long val;
char * name;
} firmware_feature_t;
extern firmware_feature_t firmware_features_table[];
#endif /* __ASSEMBLY__ */
/* CPU kernel features */
/* Retain the 32b definitions for the time being - use bottom half of word */
#define CPU_FTR_SPLIT_ID_CACHE 0x0000000000000001
#define CPU_FTR_L2CR 0x0000000000000002
#define CPU_FTR_SPEC7450 0x0000000000000004
#define CPU_FTR_ALTIVEC 0x0000000000000008
#define CPU_FTR_TAU 0x0000000000000010
#define CPU_FTR_CAN_DOZE 0x0000000000000020
#define CPU_FTR_USE_TB 0x0000000000000040
#define CPU_FTR_604_PERF_MON 0x0000000000000080
#define CPU_FTR_601 0x0000000000000100
#define CPU_FTR_HPTE_TABLE 0x0000000000000200
#define CPU_FTR_CAN_NAP 0x0000000000000400
#define CPU_FTR_L3CR 0x0000000000000800
#define CPU_FTR_L3_DISABLE_NAP 0x0000000000001000
#define CPU_FTR_NAP_DISABLE_L2_PR 0x0000000000002000
#define CPU_FTR_DUAL_PLL_750FX 0x0000000000004000
/* Add the 64b processor unique features in the top half of the word */
#define CPU_FTR_SLB 0x0000000100000000
#define CPU_FTR_16M_PAGE 0x0000000200000000
#define CPU_FTR_TLBIEL 0x0000000400000000
#define CPU_FTR_NOEXECUTE 0x0000000800000000
#define CPU_FTR_NODSISRALIGN 0x0000001000000000
/* Platform firmware features */
#define FW_FTR_ 0x0000000000000001
#ifndef __ASSEMBLY__
#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \
PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_SLB | CPU_FTR_16M_PAGE | \
CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
CPU_FTR_NODSISRALIGN)
#define COMMON_PPC64_FW (0)
#endif
#ifdef __ASSEMBLY__
#define BEGIN_FTR_SECTION 98:
#define END_FTR_SECTION(msk, val) \
99: \
.section __ftr_fixup,"a"; \
.align 3; \
.llong msk; \
.llong val; \
.llong 98b; \
.llong 99b; \
.previous
#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PPC_CPUTABLE_H */
#endif /* __KERNEL__ */
...@@ -67,7 +67,6 @@ struct machdep_calls { ...@@ -67,7 +67,6 @@ struct machdep_calls {
void (*get_cpuinfo)(struct seq_file *m); void (*get_cpuinfo)(struct seq_file *m);
void (*init_IRQ)(void); void (*init_IRQ)(void);
void (*init_ras_IRQ)(void);
int (*get_irq)(struct pt_regs *); int (*get_irq)(struct pt_regs *);
/* Optional, may be NULL. */ /* Optional, may be NULL. */
......
#ifndef _ASM_PPC64_MODULE_H #ifndef _ASM_PPC64_MODULE_H
#define _ASM_PPC64_MODULE_H #define _ASM_PPC64_MODULE_H
#include <linux/list.h>
#include <asm/bug.h>
struct mod_arch_specific struct mod_arch_specific
{ {
/* Index of stubs section within module. */ /* Index of stubs section within module. */
...@@ -8,8 +11,15 @@ struct mod_arch_specific ...@@ -8,8 +11,15 @@ struct mod_arch_specific
/* What section is the TOC? */ /* What section is the TOC? */
unsigned int toc_section; unsigned int toc_section;
/* List of BUG addresses, source line numbers and filenames */
struct list_head bug_list;
struct bug_entry *bug_table;
unsigned int num_bugs;
}; };
extern struct bug_entry *module_find_bug(unsigned long bugaddr);
#define Elf_Shdr Elf64_Shdr #define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym #define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr #define Elf_Ehdr Elf64_Ehdr
......
...@@ -730,18 +730,6 @@ static inline void prefetchw(const void *x) ...@@ -730,18 +730,6 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x) #define spin_lock_prefetch(x) prefetchw(x)
#define cpu_has_largepage() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_slb() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_tlbiel() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_noexecute() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
/* XXX we have to call HV to set when in LPAR */ /* XXX we have to call HV to set when in LPAR */
#define cpu_has_dabr() (1) #define cpu_has_dabr() (1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment