Commit 7231622b authored by Linus Torvalds's avatar Linus Torvalds

Merge http://ppc.bkbits.net/for-linus-ppc64

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 075e8c04 a2a92516
......@@ -1545,10 +1545,11 @@ local inflate_huft *fixed_tl;
local inflate_huft *fixed_td;
local voidpf falloc(q, n, s)
voidpf q; /* opaque pointer (not used) */
uInt n; /* number of items */
uInt s; /* size of item */
local voidpf falloc(
voidpf q, /* opaque pointer (not used) */
uInt n, /* number of items */
uInt s /* size of item */
)
{
Assert(s == sizeof(inflate_huft) && n <= fixed_left,
"inflate_trees falloc overflow");
......@@ -1558,10 +1559,11 @@ uInt s; /* size of item */
}
local void ffree(q, p, n)
voidpf q;
voidpf p;
uInt n;
local void ffree(
voidpf q,
voidpf p,
uInt n
)
{
Assert(0, "inflate_trees ffree called!");
if (q) q = p; /* to make some compilers happy */
......@@ -2164,10 +2166,11 @@ char *z_errmsg[] = {
#define DO16(buf) DO8(buf); DO8(buf);
/* ========================================================================= */
uLong adler32(adler, buf, len)
uLong adler;
Bytef *buf;
uInt len;
uLong adler32(
uLong adler,
Bytef *buf,
uInt len
)
{
unsigned long s1 = adler & 0xffff;
unsigned long s2 = (adler >> 16) & 0xffff;
......
......@@ -9,7 +9,7 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \
align.o semaphore.o bitops.o stab.o htab.o pacaData.o \
udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
ptrace32.o signal32.o pmc.o rtc.o init_task.o \
lmb.o pci.o pci_dn.o pci_dma.o
lmb.o pci.o pci_dn.o pci_dma.o cputable.o
obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
iSeries_IoMmTable.o iSeries_irq.o \
......@@ -19,11 +19,11 @@ obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
mf.o HvLpEvent.o iSeries_proc.o
obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
eeh.o rtasd.o nvram.o
eeh.o rtasd.o nvram.o ras.o
# Change this to pSeries only once we've got iSeries up to date
obj-y += open_pic.o xics.o pSeries_htab.o rtas.o \
chrp_setup.o i8259.o ras.o prom.o
chrp_setup.o i8259.o prom.o
obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
......@@ -31,3 +31,5 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
obj-$(CONFIG_PPC_RTAS) += rtas-proc.o
obj-$(CONFIG_SCANLOG) += scanlog.o
CFLAGS_ioctl32.o += -Ifs/
......@@ -20,6 +20,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/cache.h>
#include <asm/cputable.h>
void disable_kernel_fp(void); /* asm function from head.S */
......@@ -238,12 +239,11 @@ fix_alignment(struct pt_regs *regs)
dsisr = regs->dsisr;
/* Power4 doesn't set DSISR for an alignment interrupt */
if (!cpu_alignexc_sets_dsisr()) {
unsigned int real_instr;
if (__get_user(real_instr, (unsigned int *)regs->nip))
return 0;
dsisr = make_dsisr(real_instr);
if (cur_cpu_spec->cpu_features & CPU_FTR_NODSISRALIGN) {
unsigned int real_instr;
if (__get_user(real_instr, (unsigned int *)regs->nip))
return 0;
dsisr = make_dsisr(*((unsigned *)regs->nip));
}
/* extract the operation and registers from the dsisr */
......
......@@ -34,6 +34,7 @@
#include <asm/iSeries/HvLpEvent.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/cputable.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
......@@ -159,5 +160,12 @@ int main(void)
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
/* About the CPU features table */
DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
return 0;
}
......@@ -62,13 +62,13 @@
#include "open_pic.h"
#include <asm/xics.h>
#include <asm/ppcdebug.h>
#include <asm/cputable.h>
extern volatile unsigned char *chrp_int_ack_special;
void chrp_progress(char *, unsigned short);
extern void openpic_init_IRQ(void);
extern void init_ras_IRQ(void);
extern void find_and_init_phbs(void);
......@@ -238,7 +238,6 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.init_IRQ = xics_init_IRQ;
ppc_md.get_irq = xics_get_irq;
}
ppc_md.init_ras_IRQ = init_ras_IRQ;
ppc_md.init = chrp_init2;
......@@ -253,6 +252,34 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.progress = chrp_progress;
/* build up the firmware_features bitmask field
* using contents of device-tree/ibm,hypertas-functions.
* Ultimately this functionality may be moved into prom.c prom_init().
*/
struct device_node * dn;
char * hypertas;
unsigned int len;
dn = find_path_device("/rtas");
cur_cpu_spec->firmware_features = 0;
hypertas = get_property(dn, "ibm,hypertas-functions", &len);
if (hypertas) {
while (len > 0){
int i;
/* check value against table of strings */
for(i=0; i < FIRMWARE_MAX_FEATURES ;i++) {
if ((firmware_features_table[i].name) && (strcmp(firmware_features_table[i].name,hypertas))==0) {
/* we have a match */
cur_cpu_spec->firmware_features |= (1UL << firmware_features_table[i].val);
break;
}
}
int hypertas_len = strlen(hypertas);
len -= hypertas_len +1;
hypertas+= hypertas_len +1;
}
}
udbg_printf("firmware_features bitmask: 0x%x \n",
cur_cpu_spec->firmware_features);
}
void
......
/*
* arch/ppc64/kernel/cputable.c
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <asm/cputable.h>
struct cpu_spec* cur_cpu_spec = NULL;
extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
/* We only set the altivec features if the kernel was compiled with altivec
* support
*/
#ifdef CONFIG_ALTIVEC
#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
#else
#define CPU_FTR_ALTIVEC_COMP 0
#endif
struct cpu_spec cpu_specs[] = {
{ /* Power3 */
0xffff0000, 0x00400000, "Power3 (630)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Power3+ */
0xffff0000, 0x00410000, "Power3 (630+)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Northstar */
0xffff0000, 0x00330000, "Northstar",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Pulsar */
0xffff0000, 0x00340000, "Pulsar",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* I-star */
0xffff0000, 0x00360000, "I-star",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* S-star */
0xffff0000, 0x00370000, "S-star",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power3,
COMMON_PPC64_FW
},
{ /* Power4 */
0xffff0000, 0x00350000, "Power4",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
},
{ /* Power4+ */
0xffff0000, 0x00380000, "Power4+",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
},
{ /* default match */
0x00000000, 0x00000000, "(Power4-Compatible)",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
CPU_FTR_PPCAS_ARCH_V2,
COMMON_USER_PPC64,
128, 128,
__setup_cpu_power4,
COMMON_PPC64_FW
}
};
firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
{FW_FEATURE_DABR, "hcall-dabr"},
{FW_FEATURE_COPY, "hcall-copy"},
{FW_FEATURE_ASR, "hcall-asr"},
{FW_FEATURE_DEBUG, "hcall-debug"},
{FW_FEATURE_PERF, "hcall-perf"},
{FW_FEATURE_DUMP, "hcall-dump"},
{FW_FEATURE_INTERRUPT, "hcall-interrupt"},
{FW_FEATURE_MIGRATE, "hcall-migrate"},
};
......@@ -34,6 +34,7 @@
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
......@@ -1267,6 +1268,11 @@ _GLOBAL(__start_initialization_iSeries)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
LOADADDR(r3,cpu_specs)
LOADADDR(r4,cur_cpu_spec)
li r5,0
bl .identify_cpu
LOADADDR(r2,__toc_start)
addi r2,r2,0x4000
addi r2,r2,0x4000
......@@ -1730,6 +1736,13 @@ _STATIC(start_here_pSeries)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
LOADADDR(r3,cpu_specs)
sub r3,r3,r26
LOADADDR(r4,cur_cpu_spec)
sub r4,r4,r26
mr r5,r26
bl .identify_cpu
/* set up the TOC (physical address) */
LOADADDR(r2,__toc_start)
addi r2,r2,0x4000
......@@ -1888,6 +1901,11 @@ _STATIC(start_here_common)
bl .start_kernel
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(__setup_cpu_power4)
blr
_GLOBAL(hmt_init)
#ifdef CONFIG_HMT
LOADADDR(r5, hmt_thread_data)
......
......@@ -46,6 +46,7 @@
#include <asm/eeh.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/cputable.h>
/*
* Note: pte --> Linux PTE
......@@ -165,7 +166,8 @@ htab_initialize(void)
mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
/* XXX we currently map kernel text rw, should fix this */
if (cpu_has_largepage() && systemcfg->physicalMemorySize > 256*MB) {
if ((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
&& systemcfg->physicalMemorySize > 256*MB) {
create_pte_mapping((unsigned long)KERNELBASE,
KERNELBASE + 256*MB, mode_rw, 0);
create_pte_mapping((unsigned long)KERNELBASE + 256*MB,
......@@ -279,7 +281,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
#define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on cpus that support it */
if (unlikely(cpu_has_noexecute() && pfn_valid(pte_pfn(new_pte)))) {
if (unlikely((cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)
&& pfn_valid(pte_pfn(new_pte)))) {
struct page *page = pte_page(new_pte);
/* page is dirty */
......
......@@ -33,6 +33,7 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include "iSeries_setup.h"
......@@ -254,7 +255,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig
{
unsigned long i;
unsigned long mem_blocks = 0;
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
mem_blocks = iSeries_process_Regatta_mainstore_vpd( mb_array, max_entries );
else
mem_blocks = iSeries_process_Condor_mainstore_vpd( mb_array, max_entries );
......@@ -311,7 +312,6 @@ iSeries_init_early(void)
ppc_md.setup_residual = iSeries_setup_residual;
ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
ppc_md.init_IRQ = iSeries_init_IRQ;
ppc_md.init_ras_IRQ = NULL;
ppc_md.get_irq = iSeries_get_irq;
ppc_md.init = NULL;
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -596,7 +596,6 @@ void __init init_IRQ(void)
once++;
ppc_md.init_IRQ();
if(ppc_md.init_ras_IRQ) ppc_md.init_ras_IRQ();
}
static struct proc_dir_entry * root_irq_dir;
......
......@@ -27,6 +27,7 @@
#include <asm/cache.h>
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
#include <asm/cputable.h>
.text
......@@ -444,6 +445,95 @@ _GLOBAL(cvt_df)
stfd 0,0(r5)
blr
/*
* identify_cpu,
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
*/
_GLOBAL(identify_cpu)
mfpvr r7
1:
lwz r8,CPU_SPEC_PVR_MASK(r3)
and r8,r8,r7
lwz r9,CPU_SPEC_PVR_VALUE(r3)
cmplw 0,r9,r8
beq 1f
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
add r3,r3,r5
std r3,0(r4)
blr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
LOADADDR(r6,cur_cpu_spec)
sub r6,r6,r3
ld r4,0(r6)
sub r4,r4,r3
ld r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
LOADADDR(r6,__start___ftr_fixup)
sub r6,r6,r3
LOADADDR(r7,__stop___ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset
*
* Setup function is called with:
* r3 = data offset
* r4 = ptr to CPU spec (relocated)
*/
_GLOBAL(call_setup_cpu)
LOADADDR(r4, cur_cpu_spec)
sub r4,r4,r3
lwz r4,0(r4) # load pointer to cpu_spec
sub r4,r4,r3 # relocate
lwz r6,CPU_SPEC_SETUP(r4) # load function pointer
sub r6,r6,r3
mtctr r6
bctr
/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
......
......@@ -376,15 +376,57 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return 0;
}
LIST_HEAD(module_bug_list);
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
char *secstrings;
unsigned int i;
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
continue;
me->arch.bug_table = (void *) sechdrs[i].sh_addr;
me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
break;
}
/*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&me->arch.bug_list, &module_bug_list);
sort_ex_table((struct exception_table_entry *)me->extable,
(struct exception_table_entry *)me->extable +
me->num_exentries);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
list_del(&mod->arch.bug_list);
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
{
struct mod_arch_specific *mod;
unsigned int i;
struct bug_entry *bug;
list_for_each_entry(mod, &module_bug_list, bug_list) {
bug = mod->bug_table;
for (i = 0; i < mod->num_bugs; ++i, ++bug)
if (bugaddr == bug->bug_addr)
return bug;
}
return NULL;
}
......@@ -21,6 +21,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
#define HPTE_LOCK_BIT 3
......@@ -217,7 +218,7 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
}
/* Ensure it is out of the tlb too */
if (cpu_has_tlbiel() && !large && local) {
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
_tlbiel(va);
} else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
......@@ -283,7 +284,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
}
/* Invalidate the tlb */
if (cpu_has_tlbiel() && !large && local) {
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
_tlbiel(va);
} else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
......@@ -346,7 +347,7 @@ static void pSeries_flush_hash_range(unsigned long context,
j++;
}
if (cpu_has_tlbiel() && !large && local) {
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++) {
......
......@@ -45,6 +45,7 @@
#include <asm/machdep.h>
#include <asm/iSeries/HvCallHpt.h>
#include <asm/hardirq.h>
#include <asm/cputable.h>
struct task_struct *last_task_used_math = NULL;
......@@ -412,7 +413,7 @@ void initialize_paca_hardware_interrupt_stack(void)
* __get_free_pages() might give us a page > KERNBASE+256M which
* is mapped with large ptes so we can't set up the guard page.
*/
if (cpu_has_largepage())
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
return;
for (i=0; i < NR_CPUS; i++) {
......
......@@ -58,7 +58,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
struct pt_regs * regs);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
struct pt_regs * regs);
void init_ras_IRQ(void);
/* #define DEBUG */
......@@ -66,7 +65,8 @@ void init_ras_IRQ(void);
* Initialize handlers for the set of interrupts caused by hardware errors
* and power system events.
*/
void init_ras_IRQ(void) {
static int __init init_ras_IRQ(void)
{
struct device_node *np;
unsigned int *ireg, len, i;
......@@ -91,7 +91,10 @@ void init_ras_IRQ(void) {
ireg++;
}
}
return 1;
}
__initcall(init_ras_IRQ);
/*
* Handle power subsystem events (EPOW).
......
......@@ -47,6 +47,7 @@
#include "open_pic.h"
#include <asm/machdep.h>
#include <asm/xics.h>
#include <asm/cputable.h>
int smp_threads_ready;
unsigned long cache_decay_ticks;
......@@ -583,7 +584,7 @@ int __devinit __cpu_up(unsigned int cpu)
paca[cpu].prof_multiplier = 1;
paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
if (!cpu_has_slb()) {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
void *tmp;
/* maximum of 48 CPUs on machines with a segment table */
......
......@@ -21,6 +21,7 @@
#include <asm/paca.h>
#include <asm/naca.h>
#include <asm/pmc.h>
#include <asm/cputable.h>
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid, int large,
......@@ -38,7 +39,7 @@ void stab_initialize(unsigned long stab)
esid = GET_ESID(KERNELBASE);
vsid = get_kernel_vsid(esid << SID_SHIFT);
if (cpu_has_slb()) {
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
/* Invalidate the entire SLB & all the ERATS */
#ifdef CONFIG_PPC_ISERIES
asm volatile("isync; slbia; isync":::"memory");
......@@ -222,7 +223,7 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large,
static inline void __ste_allocate(unsigned long esid, unsigned long vsid,
int kernel_segment)
{
if (cpu_has_slb()) {
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
#ifndef CONFIG_PPC_ISERIES
if (REGION_ID(esid << SID_SHIFT) == KERNEL_REGION_ID)
make_slbe(esid, vsid, 1, kernel_segment);
......@@ -275,7 +276,7 @@ int ste_allocate(unsigned long ea)
esid = GET_ESID(ea);
__ste_allocate(esid, vsid, kernel_segment);
if (!cpu_has_slb()) {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
/* Order update */
asm volatile("sync":::"memory");
}
......@@ -327,7 +328,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
}
}
if (!cpu_has_slb()) {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
/* Order update */
asm volatile("sync" : : : "memory");
}
......@@ -336,7 +337,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Flush all user entries from the segment table of the current processor. */
void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
{
if (cpu_has_slb()) {
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
/*
* XXX disable 32bit slb invalidate optimisation until we fix
* the issue where a 32bit app execed out of a 64bit app can
......
......@@ -1136,7 +1136,11 @@ struct sysinfo32 {
u32 totalswap;
u32 freeswap;
unsigned short procs;
char _f[22];
unsigned short pad;
u32 totalhigh;
u32 freehigh;
u32 mem_unit;
char _f[20-2*sizeof(long)-sizeof(int)];
};
extern asmlinkage long sys_sysinfo(struct sysinfo *info);
......@@ -1145,11 +1149,30 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 *info)
{
struct sysinfo s;
int ret, err;
int bitcount=0;
mm_segment_t old_fs = get_fs ();
set_fs (KERNEL_DS);
ret = sys_sysinfo(&s);
set_fs (old_fs);
/* Check to see if any memory value is too large for 32-bit and
* scale down if needed.
*/
if ((s.totalram >> 32) || (s.totalswap >> 32)) {
while (s.mem_unit < PAGE_SIZE) {
s.mem_unit <<= 1;
bitcount++;
}
s.totalram >>=bitcount;
s.freeram >>= bitcount;
s.sharedram >>= bitcount;
s.bufferram >>= bitcount;
s.totalswap >>= bitcount;
s.freeswap >>= bitcount;
s.totalhigh >>= bitcount;
s.freehigh >>= bitcount;
}
err = put_user (s.uptime, &info->uptime);
err |= __put_user (s.loads[0], &info->loads[0]);
err |= __put_user (s.loads[1], &info->loads[1]);
......@@ -1161,6 +1184,9 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 *info)
err |= __put_user (s.totalswap, &info->totalswap);
err |= __put_user (s.freeswap, &info->freeswap);
err |= __put_user (s.procs, &info->procs);
err |= __put_user (s.totalhigh, &info->totalhigh);
err |= __put_user (s.freehigh, &info->freehigh);
err |= __put_user (s.mem_unit, &info->mem_unit);
if (err)
return -EFAULT;
......
......@@ -28,6 +28,7 @@
#include <linux/interrupt.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
......@@ -306,6 +307,56 @@ static void parse_fpe(struct pt_regs *regs)
_exception(SIGFPE, &info, regs);
}
/*
* Look through the list of trap instructions that are used for BUG(),
* BUG_ON() and WARN_ON() and see if we hit one. At this point we know
* that the exception was caused by a trap instruction of some kind.
* Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
* otherwise.
*/
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
#ifndef CONFIG_MODULES
#define module_find_bug(x) NULL
#endif
static struct bug_entry *find_bug(unsigned long bugaddr)
{
struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug->bug_addr)
return bug;
return module_find_bug(bugaddr);
}
int
check_bug_trap(struct pt_regs *regs)
{
struct bug_entry *bug;
unsigned long addr;
if (regs->msr & MSR_PR)
return 0; /* not in kernel */
addr = regs->nip; /* address of trap instruction */
if (addr < PAGE_OFFSET)
return 0;
bug = find_bug(regs->nip);
if (bug == NULL)
return 0;
if (bug->line & BUG_WARNING_TRAP) {
/* this is a WARN_ON rather than BUG/BUG_ON */
printk(KERN_ERR "Badness in %s at %s:%d\n",
bug->function, bug->file,
bug->line & ~BUG_WARNING_TRAP);
dump_stack();
return 1;
}
printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
bug->function, bug->file, bug->line);
return 0;
}
void
ProgramCheckException(struct pt_regs *regs)
{
......@@ -330,6 +381,10 @@ ProgramCheckException(struct pt_regs *regs)
if (debugger_bpt && debugger_bpt(regs))
return;
#endif
if (check_bug_trap(regs)) {
regs->nip += 4;
return;
}
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
......
......@@ -130,7 +130,7 @@ _GLOBAL(__copy_tofrom_user)
6: cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
blt cr1,.Ldo_tail
ble cr1,.Ldo_tail
34: ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
......
......@@ -200,6 +200,7 @@ _GLOBAL(__clear_user)
.llong 8b,92b
.text
/* r3 = dst, r4 = src, r5 = count */
_GLOBAL(__strncpy_from_user)
addi r6,r3,-1
addi r4,r4,-1
......@@ -222,14 +223,10 @@ _GLOBAL(__strncpy_from_user)
.llong 1b,99b
.text
/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
/* r3 = str, r4 = len (> 0) */
_GLOBAL(__strnlen_user)
addi r7,r3,-1
subf r6,r7,r5 /* top+1 - str */
cmplw 0,r4,r6
bge 0f
mr r6,r4
0: mtctr r6 /* ctr = min(len, top - str) */
mtctr r4 /* ctr = len */
1: lbzu r0,1(r7) /* get next byte */
cmpwi 0,r0,0
bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
......
......@@ -59,6 +59,7 @@
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/ppcdebug.h>
......@@ -512,7 +513,7 @@ void __init paging_init(void)
static struct kcore_list kcore_vmem;
static void setup_kcore(void)
static int __init setup_kcore(void)
{
int i;
......@@ -536,7 +537,10 @@ static void setup_kcore(void)
}
kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
return 0;
}
module_init(setup_kcore);
void initialize_paca_hardware_interrupt_stack(void);
......@@ -606,8 +610,6 @@ void __init mem_init(void)
#endif
mem_init_done = 1;
setup_kcore();
/* set the last page of each hardware interrupt stack to be protected */
initialize_paca_hardware_interrupt_stack();
......@@ -698,7 +700,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
int local = 0;
/* handle i-cache coherency */
if (!cpu_has_noexecute()) {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)) {
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
......
......@@ -154,8 +154,28 @@ static int __init parse_numa_properties(void)
if (max_domain < numa_domain)
max_domain = numa_domain;
node_data[numa_domain].node_start_pfn = start / PAGE_SIZE;
node_data[numa_domain].node_size = size / PAGE_SIZE;
/*
* For backwards compatibility, OF splits the first node
* into two regions (the first being 0-4GB). Check for
* this simple case and complain if there is a gap in
* memory
*/
if (node_data[numa_domain].node_size) {
unsigned long shouldstart =
node_data[numa_domain].node_start_pfn +
node_data[numa_domain].node_size;
if (shouldstart != (start / PAGE_SIZE)) {
printk(KERN_ERR "Hole in node, disabling "
"region start %lx length %lx\n",
start, size);
continue;
}
node_data[numa_domain].node_size += size / PAGE_SIZE;
} else {
node_data[numa_domain].node_start_pfn =
start / PAGE_SIZE;
node_data[numa_domain].node_size = size / PAGE_SIZE;
}
for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
......@@ -174,6 +194,20 @@ static int __init parse_numa_properties(void)
return 0;
}
void setup_nonnuma(void)
{
unsigned long i;
for (i = 0; i < NR_CPUS; i++)
map_cpu_to_node(i, 0);
node_data[0].node_start_pfn = 0;
node_data[0].node_size = lmb_end_of_DRAM() / PAGE_SIZE;
for (i = 0 ; i < lmb_end_of_DRAM(); i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
}
void __init do_init_bootmem(void)
{
int nid;
......@@ -181,9 +215,8 @@ void __init do_init_bootmem(void)
min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
/* XXX FIXME: support machines without associativity information */
if (parse_numa_properties())
BUG();
setup_nonnuma();
for (nid = 0; nid < numnodes; nid++) {
unsigned long start_paddr, end_paddr;
......@@ -204,7 +237,7 @@ void __init do_init_bootmem(void)
NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
bootmap_pages = bootmem_bootmap_pages(end_paddr - start_paddr);
bootmap_pages = bootmem_bootmap_pages((end_paddr - start_paddr) >> PAGE_SHIFT);
dbg("bootmap_pages = %lx\n", bootmap_pages);
bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
......
......@@ -65,6 +65,14 @@ SECTIONS
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
__start___bug_table = .;
__bug_table : { *(__bug_table) }
__stop___bug_table = .;
__start___ftr_fixup = .;
__ftr_fixup : { *(__ftr_fixup) }
__stop___ftr_fixup = .;
. = ALIGN(16384); /* init_task */
.data.init_task : { *(.data.init_task) }
......
#ifndef _PPC64_BUG_H
#define _PPC64_BUG_H
#include <linux/config.h>
/*
* Define an illegal instr to trap on the bug.
* We don't use 0 because that marks the end of a function
......@@ -13,29 +11,48 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_XMON
struct pt_regs;
extern void xmon(struct pt_regs *excp);
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
xmon(0); \
} while (0)
#else
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
__asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
struct bug_entry {
unsigned long bug_addr;
long line;
const char *file;
const char *function;
};
/*
* If this bit is set in the line number it means that the trap
* is for WARN_ON rather than BUG or BUG_ON.
*/
#define BUG_WARNING_TRAP 0x1000000
#define BUG() do { \
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
".section __bug_table,\"a\"\n\t" \
" .llong 1b,%0,%1,%2\n" \
".previous" \
: : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
} while (0)
#endif
#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
#define BUG_ON(x) do { \
__asm__ __volatile__( \
"1: tdnei %0,0\n" \
".section __bug_table,\"a\"\n\t" \
" .llong 1b,%1,%2,%3\n" \
".previous" \
: : "r" (x), "i" (__LINE__), "i" (__FILE__), \
"i" (__FUNCTION__)); \
} while (0)
#define PAGE_BUG(page) do { BUG(); } while (0)
#define WARN_ON(condition) do { \
if (unlikely((condition)!=0)) { \
printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
dump_stack(); \
} \
#define WARN_ON(x) do { \
__asm__ __volatile__( \
"1: tdnei %0,0\n" \
".section __bug_table,\"a\"\n\t" \
" .llong 1b,%1,%2,%3\n" \
".previous" \
: : "r" (x), "i" (__LINE__ + BUG_WARNING_TRAP), \
"i" (__FILE__), "i" (__FUNCTION__)); \
} while (0)
#endif
......
/*
* include/asm-ppc64/cputable.h
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_PPC_CPUTABLE_H
#define __ASM_PPC_CPUTABLE_H
/* Exposed to userland CPU features - Must match ppc32 definitions */
#define PPC_FEATURE_32 0x80000000
#define PPC_FEATURE_64 0x40000000
#define PPC_FEATURE_601_INSTR 0x20000000
#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
#define PPC_FEATURE_HAS_FPU 0x08000000
#define PPC_FEATURE_HAS_MMU 0x04000000
#define PPC_FEATURE_HAS_4xxMAC 0x02000000
#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/* This structure can grow, it's real size is used by head.S code
* via the mkdefs mecanism.
*/
struct cpu_spec;
typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
struct cpu_spec {
/* CPU is matched via (PVR & pvr_mask) == pvr_value */
unsigned int pvr_mask;
unsigned int pvr_value;
char *cpu_name;
unsigned long cpu_features; /* Kernel features */
unsigned int cpu_user_features; /* Userland features */
/* cache line sizes */
unsigned int icache_bsize;
unsigned int dcache_bsize;
/* this is called to initialize various CPU bits like L1 cache,
* BHT, SPD, etc... from head.S before branching to identify_machine
*/
cpu_setup_t cpu_setup;
/* This is used to identify firmware features which are available
* to the kernel.
*/
unsigned long firmware_features;
};
extern struct cpu_spec cpu_specs[];
extern struct cpu_spec *cur_cpu_spec;
/* firmware feature bitmask values */
#define FIRMWARE_MAX_FEATURES 63
#define FW_FEATURE_PFT (1UL<<0)
#define FW_FEATURE_TCE (1UL<<1)
#define FW_FEATURE_SPRG0 (1UL<<2)
#define FW_FEATURE_DABR (1UL<<3)
#define FW_FEATURE_COPY (1UL<<4)
#define FW_FEATURE_ASR (1UL<<5)
#define FW_FEATURE_DEBUG (1UL<<6)
#define FW_FEATURE_PERF (1UL<<7)
#define FW_FEATURE_DUMP (1UL<<8)
#define FW_FEATURE_INTERRUPT (1UL<<9)
#define FW_FEATURE_MIGRATE (1UL<<10)
typedef struct {
unsigned long val;
char * name;
} firmware_feature_t;
extern firmware_feature_t firmware_features_table[];
#endif /* __ASSEMBLY__ */
/* CPU kernel features */
/* Retain the 32b definitions for the time being - use bottom half of word */
#define CPU_FTR_SPLIT_ID_CACHE 0x0000000000000001
#define CPU_FTR_L2CR 0x0000000000000002
#define CPU_FTR_SPEC7450 0x0000000000000004
#define CPU_FTR_ALTIVEC 0x0000000000000008
#define CPU_FTR_TAU 0x0000000000000010
#define CPU_FTR_CAN_DOZE 0x0000000000000020
#define CPU_FTR_USE_TB 0x0000000000000040
#define CPU_FTR_604_PERF_MON 0x0000000000000080
#define CPU_FTR_601 0x0000000000000100
#define CPU_FTR_HPTE_TABLE 0x0000000000000200
#define CPU_FTR_CAN_NAP 0x0000000000000400
#define CPU_FTR_L3CR 0x0000000000000800
#define CPU_FTR_L3_DISABLE_NAP 0x0000000000001000
#define CPU_FTR_NAP_DISABLE_L2_PR 0x0000000000002000
#define CPU_FTR_DUAL_PLL_750FX 0x0000000000004000
/* Add the 64b processor unique features in the top half of the word */
#define CPU_FTR_SLB 0x0000000100000000
#define CPU_FTR_16M_PAGE 0x0000000200000000
#define CPU_FTR_TLBIEL 0x0000000400000000
#define CPU_FTR_NOEXECUTE 0x0000000800000000
#define CPU_FTR_NODSISRALIGN 0x0000001000000000
/* Platform firmware features */
#define FW_FTR_ 0x0000000000000001
#ifndef __ASSEMBLY__
#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_64 | \
PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU)
#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_SLB | CPU_FTR_16M_PAGE | \
CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
CPU_FTR_NODSISRALIGN)
#define COMMON_PPC64_FW (0)
#endif
#ifdef __ASSEMBLY__
#define BEGIN_FTR_SECTION 98:
#define END_FTR_SECTION(msk, val) \
99: \
.section __ftr_fixup,"a"; \
.align 3; \
.llong msk; \
.llong val; \
.llong 98b; \
.llong 99b; \
.previous
#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PPC_CPUTABLE_H */
#endif /* __KERNEL__ */
......@@ -67,7 +67,6 @@ struct machdep_calls {
void (*get_cpuinfo)(struct seq_file *m);
void (*init_IRQ)(void);
void (*init_ras_IRQ)(void);
int (*get_irq)(struct pt_regs *);
/* Optional, may be NULL. */
......
#ifndef _ASM_PPC64_MODULE_H
#define _ASM_PPC64_MODULE_H
#include <linux/list.h>
#include <asm/bug.h>
struct mod_arch_specific
{
/* Index of stubs section within module. */
......@@ -8,8 +11,15 @@ struct mod_arch_specific
/* What section is the TOC? */
unsigned int toc_section;
/* List of BUG addresses, source line numbers and filenames */
struct list_head bug_list;
struct bug_entry *bug_table;
unsigned int num_bugs;
};
extern struct bug_entry *module_find_bug(unsigned long bugaddr);
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
......
......@@ -730,18 +730,6 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x)
#define cpu_has_largepage() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_slb() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_tlbiel() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_noexecute() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
/* XXX we have to call HV to set when in LPAR */
#define cpu_has_dabr() (1)
......
......@@ -24,8 +24,10 @@
* For historical reasons, these macros are grossly misnamed.
*/
#define KERNEL_DS ((mm_segment_t) { 0 })
#define USER_DS ((mm_segment_t) { 1 })
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_DS MAKE_MM_SEG(0UL)
#define USER_DS MAKE_MM_SEG(0xf000000000000000UL)
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.fs)
......@@ -33,12 +35,25 @@
#define segment_eq(a,b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
/*
* Use the alpha trick for checking ranges:
*
* Is a address valid? This does a straightforward calculation rather
* than tests.
*
* Address valid if:
* - "addr" doesn't have any high-bits set
* - AND "size" doesn't have any high-bits set
* - AND "addr+size" doesn't have any high-bits set
* - OR we are in kernel mode.
*/
#define __access_ok(addr,size,segment) \
(((segment).seg & (addr | size | (addr+size))) == 0)
static inline int verify_area(int type, const void * addr, unsigned long size)
#define access_ok(type,addr,size) \
__access_ok(((unsigned long)(addr)),(size),get_fs())
static inline int verify_area(int type, const void *addr, unsigned long size)
{
return access_ok(type,addr,size) ? 0 : -EFAULT;
}
......@@ -96,32 +111,32 @@ extern void sort_exception_table(void);
extern long __put_user_bad(void);
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
#define __put_user_check(x,ptr,size) \
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
long __pu_err; \
__put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \
__pu_err; \
})
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
switch (size) { \
case 1: __put_user_asm(x,ptr,retval,"stb"); break; \
case 2: __put_user_asm(x,ptr,retval,"sth"); break; \
case 4: __put_user_asm(x,ptr,retval,"stw"); break; \
case 8: __put_user_asm(x,ptr,retval,"std"); break; \
default: __put_user_bad(); \
} \
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
__pu_err; \
})
#define __put_user_size(x,ptr,size,retval,errret) \
do { \
retval = 0; \
switch (size) { \
case 1: __put_user_asm(x,ptr,retval,"stb",errret); break; \
case 2: __put_user_asm(x,ptr,retval,"sth",errret); break; \
case 4: __put_user_asm(x,ptr,retval,"stw",errret); break; \
case 8: __put_user_asm(x,ptr,retval,"std",errret); break; \
default: __put_user_bad(); \
} \
} while (0)
/*
......@@ -129,7 +144,7 @@ do { \
* because we do not write to any memory gcc knows about, so there
* are no aliasing issues.
*/
#define __put_user_asm(x, addr, err, op) \
#define __put_user_asm(x, addr, err, op, errret) \
__asm__ __volatile__( \
"1: "op" %1,0(%2) # put_user\n" \
"2:\n" \
......@@ -142,13 +157,13 @@ do { \
" .llong 1b,3b\n" \
".previous" \
: "=r"(err) \
: "r"(x), "b"(addr), "i"(-EFAULT), "0"(err))
: "r"(x), "b"(addr), "i"(errret), "0"(err))
#define __get_user_nocheck(x,ptr,size) \
({ \
long __gu_err, __gu_val; \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
__get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
......@@ -158,26 +173,26 @@ do { \
long __gu_err = -EFAULT, __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
extern long __get_user_bad(void);
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
switch (size) { \
case 1: __get_user_asm(x,ptr,retval,"lbz"); break; \
case 2: __get_user_asm(x,ptr,retval,"lhz"); break; \
case 4: __get_user_asm(x,ptr,retval,"lwz"); break; \
case 8: __get_user_asm(x,ptr,retval,"ld"); break; \
default: (x) = __get_user_bad(); \
} \
#define __get_user_size(x,ptr,size,retval,errret) \
do { \
retval = 0; \
switch (size) { \
case 1: __get_user_asm(x,ptr,retval,"lbz",errret); break; \
case 2: __get_user_asm(x,ptr,retval,"lhz",errret); break; \
case 4: __get_user_asm(x,ptr,retval,"lwz",errret); break; \
case 8: __get_user_asm(x,ptr,retval,"ld",errret); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm(x, addr, err, op) \
#define __get_user_asm(x, addr, err, op, errret) \
__asm__ __volatile__( \
"1: "op" %1,0(%2) # get_user\n" \
"2:\n" \
......@@ -191,57 +206,97 @@ do { \
" .llong 1b,3b\n" \
".previous" \
: "=r"(err), "=r"(x) \
: "b"(addr), "i"(-EFAULT), "0"(err))
: "b"(addr), "i"(errret), "0"(err))
/* more complex routines */
extern unsigned long __copy_tofrom_user(void *to, const void *from, unsigned long size);
extern unsigned long __copy_tofrom_user(void *to, const void *from,
unsigned long size);
/* XXX should zero destination if fault happened */
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n)
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long over;
if (access_ok(VERIFY_READ, from, n))
return __copy_tofrom_user(to, from, n);
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
memset(to + over, 0, over);
return __copy_tofrom_user(to, from, n - over) + over;
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
__get_user_size(*(u8 *)to, from, 1, ret, 1);
return ret;
case 2:
__get_user_size(*(u16 *)to, from, 2, ret, 2);
return ret;
case 4:
__get_user_size(*(u32 *)to, from, 4, ret, 4);
return ret;
case 8:
__get_user_size(*(u64 *)to, from, 8, ret, 8);
return ret;
}
}
return __copy_tofrom_user(to, from, n);
}
static inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
__put_user_size(*(u8 *)from, (u8 *)to, 1, ret, 1);
return ret;
case 2:
__put_user_size(*(u16 *)from, (u16 *)to, 2, ret, 2);
return ret;
case 4:
__put_user_size(*(u32 *)from, (u32 *)to, 4, ret, 4);
return ret;
case 8:
__put_user_size(*(u64 *)from, (u64 *)to, 8, ret, 8);
return ret;
}
}
return __copy_tofrom_user(to, from, n);
}
#define __copy_in_user(to, from, size) \
__copy_tofrom_user((to), (from), (size))
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n)))
n = __copy_from_user(to, from, n);
return n;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long n)
{
unsigned long over;
if (access_ok(VERIFY_WRITE, to, n))
return __copy_tofrom_user(to, from, n);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
return __copy_tofrom_user(to, from, n - over) + over;
}
if (likely(access_ok(VERIFY_WRITE, to, n)))
n = __copy_to_user(to, from, n);
return n;
}
#define __copy_from_user(to, from, size) \
__copy_tofrom_user((to), (from), (size))
#define __copy_to_user(to, from, size) \
__copy_tofrom_user((to), (from), (size))
static inline unsigned long
copy_in_user(void *to, const void *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n) &&
access_ok(VERIFY_WRITE, to, n)))
n =__copy_tofrom_user(to, from, n);
return n;
}
extern unsigned long __clear_user(void *addr, unsigned long size);
static inline unsigned long
clear_user(void *addr, unsigned long size)
{
if (access_ok(VERIFY_WRITE, addr, size))
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
return __clear_user(addr, size - over) + over;
}
if (likely(access_ok(VERIFY_WRITE, addr, size)))
size = __clear_user(addr, size);
return size;
}
......@@ -250,7 +305,7 @@ extern int __strncpy_from_user(char *dst, const char *src, long count);
static inline long
strncpy_from_user(char *dst, const char *src, long count)
{
if (access_ok(VERIFY_READ, src, 1))
if (likely(access_ok(VERIFY_READ, src, 1)))
return __strncpy_from_user(dst, src, count);
return -EFAULT;
}
......@@ -260,24 +315,18 @@ strncpy_from_user(char *dst, const char *src, long count)
*
* Return 0 for error
*/
extern int __strnlen_user(const char *str, long len, unsigned long top);
extern int __strnlen_user(const char *str, long len);
/*
* Returns the length of the string at str (including the null byte),
* or 0 if we hit a page we can't access,
* or something > len if we didn't find a null byte.
*
* The `top' parameter to __strnlen_user is to make sure that
* we can never overflow from the user area into kernel space.
*/
static inline int strnlen_user(const char *str, long len)
{
unsigned long top = __kernel_ok? ~0UL: TASK_SIZE - 1;
if ((unsigned long)str > top)
return 0;
return __strnlen_user(str, len, top);
if (likely(access_ok(VERIFY_READ, str, 1)))
return __strnlen_user(str, len);
return 0;
}
#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
......
......@@ -267,6 +267,104 @@
#ifndef __ASSEMBLY__
/* On powerpc a system call basically clobbers the same registers like a
* function call, with the exception of LR (which is needed for the
* "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
* an error return status).
*/
#define __syscall_nr(nr, type, name, args...) \
unsigned long __sc_ret, __sc_err; \
{ \
register unsigned long __sc_0 __asm__ ("r0"); \
register unsigned long __sc_3 __asm__ ("r3"); \
register unsigned long __sc_4 __asm__ ("r4"); \
register unsigned long __sc_5 __asm__ ("r5"); \
register unsigned long __sc_6 __asm__ ("r6"); \
register unsigned long __sc_7 __asm__ ("r7"); \
\
__sc_loadargs_##nr(name, args); \
__asm__ __volatile__ \
("sc \n\t" \
"mfcr %0 " \
: "=&r" (__sc_0), \
"=&r" (__sc_3), "=&r" (__sc_4), \
"=&r" (__sc_5), "=&r" (__sc_6), \
"=&r" (__sc_7) \
: __sc_asm_input_##nr \
: "cr0", "ctr", "memory", \
"r8", "r9", "r10","r11", "r12"); \
__sc_ret = __sc_3; \
__sc_err = __sc_0; \
} \
if (__sc_err & 0x10000000) \
{ \
errno = __sc_ret; \
__sc_ret = -1; \
} \
return (type) __sc_ret
#define __sc_loadargs_0(name, dummy...) \
__sc_0 = __NR_##name
#define __sc_loadargs_1(name, arg1) \
__sc_loadargs_0(name); \
__sc_3 = (unsigned long) (arg1)
#define __sc_loadargs_2(name, arg1, arg2) \
__sc_loadargs_1(name, arg1); \
__sc_4 = (unsigned long) (arg2)
#define __sc_loadargs_3(name, arg1, arg2, arg3) \
__sc_loadargs_2(name, arg1, arg2); \
__sc_5 = (unsigned long) (arg3)
#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \
__sc_loadargs_3(name, arg1, arg2, arg3); \
__sc_6 = (unsigned long) (arg4)
#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \
__sc_loadargs_4(name, arg1, arg2, arg3, arg4); \
__sc_7 = (unsigned long) (arg5)
#define __sc_asm_input_0 "0" (__sc_0)
#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
#define _syscall0(type,name) \
type name(void) \
{ \
__syscall_nr(0, type, name); \
}
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) \
{ \
__syscall_nr(1, type, name, arg1); \
}
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1, type2 arg2) \
{ \
__syscall_nr(2, type, name, arg1, arg2); \
}
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1, type2 arg2, type3 arg3) \
{ \
__syscall_nr(3, type, name, arg1, arg2, arg3); \
}
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
__syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \
}
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
{ \
__syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
}
#ifdef __KERNEL_SYSCALLS__
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment