Commit 2549eb55 authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/tmp3
parents 8a82602e 637823d2
......@@ -98,7 +98,7 @@ $(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE
$(obj)/kernel-initrd.gz: $(obj)/ramdisk.image.gz
cp -f $(obj)/ramdisk.image.gz $@
$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz
$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz FORCE
touch $@
$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c FORCE
......
......@@ -596,7 +596,7 @@ DataAccess_common:
cmpi 0,r22,0xc
/* Segment fault on a bolted segment. Go off and map that segment. */
beq .do_stab_bolted
beq- .do_stab_bolted
stab_bolted_user_return:
EXCEPTION_PROLOG_COMMON
ld r3,_DSISR(r1)
......@@ -606,7 +606,7 @@ stab_bolted_user_return:
rlwinm r4,r3,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
ld r3,_DAR(r1) /* into the hash table */
beq 2f /* If so handle it */
beq+ 2f /* If so handle it */
li r4,0x300 /* Trap number */
bl .do_stab_SI
b 1f
......@@ -658,7 +658,7 @@ InstructionAccess_common:
EXCEPTION_PROLOG_COMMON
andis. r0,r23,0x0020 /* no ste found? */
beq 2f
beq+ 2f
mr r3,r22 /* SRR0 at interrupt */
li r4,0x400 /* Trap number */
bl .do_stab_SI
......@@ -688,7 +688,7 @@ InstructionAccessSLB_common:
li r4,0x480 /* Exception vector */
bl .ste_allocate
or. r3,r3,r3 /* Check return code */
beq fast_exception_return /* Return if we succeeded */
beq+ fast_exception_return /* Return if we succeeded */
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef DO_SOFT_DISABLE
......@@ -897,18 +897,8 @@ _GLOBAL(do_stab_bolted)
mfspr r22,DSISR
andis. r22,r22,0x0020
bne+ 2f
ld r22,8(r21) /* get SRR1 */
andi. r22,r22,MSR_PR /* check if from user */
bne+ stab_bolted_user_return /* from user, send the error on up */
#if 0
li r3,0
#ifdef CONFIG_XMON
bl .xmon
#endif
1: b 1b
#endif
2:
beq- stab_bolted_user_return
/* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
mfspr r21,DAR
rldicl r20,r21,36,32 /* Permits a full 32b of ESID */
......@@ -1106,9 +1096,11 @@ SLB_NUM_ENTRIES = 64
oris r21,r23,2048 /* valid bit */
rldimi r21,r22,0,52 /* Insert entry */
isync
/*
* No need for an isync before or after this slbmte. The exception
* we enter with and the rfid we exit with are context synchronizing .
*/
slbmte r20,r21
isync
/* All done -- return from exception. */
mfsprg r20,3 /* Load the PACA pointer */
......@@ -1861,9 +1853,9 @@ _STATIC(start_here_common)
li r5,0
std r0,PACAKSAVE(r13)
/* ptr to hardware interrupt stack for processor 0 */
/* ptr to hardware interrupt stack for boot processor */
LOADADDR(r3, hardware_int_paca0)
li r5,0x1000
li r5,PAGE_SIZE
sldi r5,r5,3
subi r5,r5,STACK_FRAME_OVERHEAD
......@@ -1991,7 +1983,7 @@ ioremap_dir:
.globl hardware_int_paca0
hardware_int_paca0:
.space 8*4096
.space 8*PAGE_SIZE
/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
.globl stab_array
......
......@@ -864,7 +864,7 @@ static void iSeries_setup_dprofile(void)
{
if ( dprof_buffer ) {
unsigned i;
for (i=0; i<MAX_PACAS; ++i) {
for (i=0; i<NR_CPUS; ++i) {
paca[i].prof_shift = dprof_shift;
paca[i].prof_len = dprof_len-1;
paca[i].prof_buffer = dprof_buffer;
......
......@@ -381,22 +381,48 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
}
static inline void
handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
extern char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen);
static inline void handle_irq_event(int irq, struct pt_regs *regs,
struct irqaction *action)
{
int status = 0;
int retval = 0;
struct irqaction *first_action = action;
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
do {
status |= action->flags;
action->handler(irq, action->dev_id, regs);
retval |= action->handler(irq, action->dev_id, regs);
action = action->next;
} while (action);
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();
if (retval != 1) {
static int count = 100;
char name_buf[256];
if (count) {
count--;
if (retval) {
printk("irq event %d: bogus retval mask %x\n",
irq, retval);
} else {
printk("irq %d: nobody cared!\n", irq);
}
dump_stack();
printk("handlers:\n");
action = first_action;
do {
printk("[<%p>]", action->handler);
printk(" (%s)\n",
ppc_find_proc_name((unsigned *)action->handler, name_buf, 256));
action = action->next;
} while (action);
}
}
}
/*
......@@ -676,7 +702,7 @@ static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
#ifdef CONFIG_PPC_ISERIES
{
unsigned i;
for (i=0; i<MAX_PACAS; ++i) {
for (i=0; i<NR_CPUS; ++i) {
if ( paca[i].prof_buffer && (new_value & 1) )
paca[i].prof_enabled = 1;
else
......
......@@ -609,7 +609,7 @@ _GLOBAL(sys_call_table32)
.llong .sys_ni_syscall /* old profil syscall */
.llong .compat_sys_statfs
.llong .compat_sys_fstatfs /* 100 */
.llong .sys_ioperm
.llong .sys_ni_syscall /* old ioperm syscall */
.llong .compat_sys_socketcall
.llong .sys32_syslog
.llong .compat_sys_setitimer
......@@ -852,7 +852,7 @@ _GLOBAL(sys_call_table)
.llong .sys_ni_syscall /* old profil syscall holder */
.llong .sys_statfs
.llong .sys_fstatfs /* 100 */
.llong .sys_ioperm
.llong .sys_ni_syscall /* old ioperm syscall */
.llong .sys_socketcall
.llong .sys_syslog
.llong .sys_setitimer
......
......@@ -64,7 +64,7 @@ struct systemcfg *systemcfg;
(&paca[number].exception_stack[0]) - EXC_FRAME_SIZE, \
}
struct paca_struct paca[MAX_PACAS] __page_aligned = {
struct paca_struct paca[NR_CPUS] __page_aligned = {
#ifdef CONFIG_PPC_ISERIES
PACAINITDATA( 0, 1, &xItLpQueue, 0, STAB0_VIRT_ADDR),
#else
......@@ -101,6 +101,7 @@ struct paca_struct paca[MAX_PACAS] __page_aligned = {
PACAINITDATA(29, 0, 0, 0, 0),
PACAINITDATA(30, 0, 0, 0, 0),
PACAINITDATA(31, 0, 0, 0, 0),
#if NR_CPUS > 32
PACAINITDATA(32, 0, 0, 0, 0),
PACAINITDATA(33, 0, 0, 0, 0),
PACAINITDATA(34, 0, 0, 0, 0),
......@@ -116,5 +117,22 @@ struct paca_struct paca[MAX_PACAS] __page_aligned = {
PACAINITDATA(44, 0, 0, 0, 0),
PACAINITDATA(45, 0, 0, 0, 0),
PACAINITDATA(46, 0, 0, 0, 0),
PACAINITDATA(47, 0, 0, 0, 0)
PACAINITDATA(47, 0, 0, 0, 0),
PACAINITDATA(48, 0, 0, 0, 0),
PACAINITDATA(49, 0, 0, 0, 0),
PACAINITDATA(50, 0, 0, 0, 0),
PACAINITDATA(51, 0, 0, 0, 0),
PACAINITDATA(52, 0, 0, 0, 0),
PACAINITDATA(53, 0, 0, 0, 0),
PACAINITDATA(54, 0, 0, 0, 0),
PACAINITDATA(55, 0, 0, 0, 0),
PACAINITDATA(56, 0, 0, 0, 0),
PACAINITDATA(57, 0, 0, 0, 0),
PACAINITDATA(58, 0, 0, 0, 0),
PACAINITDATA(59, 0, 0, 0, 0),
PACAINITDATA(60, 0, 0, 0, 0),
PACAINITDATA(61, 0, 0, 0, 0),
PACAINITDATA(62, 0, 0, 0, 0),
PACAINITDATA(63, 0, 0, 0, 0),
#endif
};
......@@ -124,7 +124,7 @@ struct pci_dev *pci_find_dev_by_addr(unsigned long addr)
ioaddr = (addr > isa_io_base) ? addr - isa_io_base : 0;
pci_for_each_dev(dev) {
if ((dev->class >> 8) == PCI_BASE_CLASS_BRIDGE)
if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
continue;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
unsigned long start = pci_resource_start(dev,i);
......
......@@ -710,12 +710,6 @@ void create_tce_tables_for_busesLP(struct list_head *bus_list)
for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
bus = pci_bus_b(ln);
busdn = PCI_GET_DN(bus);
/* NOTE: there should never be a window declared on a bus when
* child devices also have a window. If this should ever be
* architected, we probably want children to have priority.
* In reality, the PHB containing ISA has the property, but otherwise
* it is the pci-bridges that have the property.
*/
dma_window = (u32 *)get_property(busdn, "ibm,dma-window", 0);
if (dma_window) {
/* Bussubno hasn't been copied yet.
......@@ -724,6 +718,7 @@ void create_tce_tables_for_busesLP(struct list_head *bus_list)
busdn->bussubno = bus->number;
create_pci_bus_tce_table((unsigned long)busdn);
}
/* look for a window on a bridge even if the PHB had one */
create_tce_tables_for_busesLP(&bus->children);
}
}
......
......@@ -67,18 +67,34 @@ enable_kernel_fp(void)
#endif /* CONFIG_SMP */
}
int
dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
#ifdef CONFIG_SMP
static void smp_unlazy_onefpu(void *arg)
{
/*
* XXX temporary workaround until threaded coredumps for ppc64
* are implemented - Anton
*/
struct pt_regs *regs = current->thread.regs;
if (!regs)
return 0;
return;
if (regs->msr & MSR_FP)
giveup_fpu(current);
memcpy(fpregs, &current->thread.fpr[0], sizeof(*fpregs));
}
void dump_smp_unlazy_fpu(void)
{
smp_call_function(smp_unlazy_onefpu, NULL, 1, 1);
}
#endif
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
struct pt_regs *regs = tsk->thread.regs;
if (!regs)
return 0;
if (tsk == current && (regs->msr & MSR_FP))
giveup_fpu(current);
memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
return 1;
}
......@@ -113,7 +129,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
static void show_tsk_stack(struct task_struct *p, unsigned long sp);
static char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen);
char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen);
void show_regs(struct pt_regs * regs)
{
......@@ -410,7 +426,7 @@ void initialize_paca_hardware_interrupt_stack(void)
extern char _stext[], _etext[], __init_begin[], __init_end[];
static char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen)
char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen)
{
unsigned long tb_flags;
unsigned short name_len;
......
......@@ -1134,7 +1134,7 @@ prom_init(unsigned long r3, unsigned long r4, unsigned long pp,
_prom->cpu = (int)(unsigned long)getprop_rval;
_xPaca[_prom->cpu].active = 1;
#ifdef CONFIG_SMP
RELOC(cpu_online_map) = 1 << _prom->cpu;
RELOC(cpu_online_map) = 1UL << _prom->cpu;
#endif
RELOC(boot_cpuid) = _prom->cpu;
......
......@@ -194,7 +194,7 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
printk("naca->pftSize = 0x%lx\n", naca->pftSize);
printk("naca->debug_switch = 0x%lx\n", naca->debug_switch);
printk("naca->interrupt_controller = 0x%ld\n", naca->interrupt_controller);
printk("systemcf = 0x%p\n", systemcfg);
printk("systemcfg = 0x%p\n", systemcfg);
printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
printk("systemcfg->dCacheL1LineSize = 0x%x\n", systemcfg->dCacheL1LineSize);
......@@ -256,7 +256,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
return 0;
}
if (!(cpu_online_map & (1<<cpu_id)))
if (!(cpu_online_map & (1UL << cpu_id)))
return 0;
#ifdef CONFIG_SMP
......@@ -584,7 +584,7 @@ int set_spread_lpevents( char * str )
/* The parameter is the number of processors to share in processing lp events */
unsigned long i;
unsigned long val = simple_strtoul( str, NULL, 0 );
if ( ( val > 0 ) && ( val <= MAX_PACAS ) ) {
if ( ( val > 0 ) && ( val <= NR_CPUS ) ) {
for ( i=1; i<val; ++i )
paca[i].lpQueuePtr = paca[0].lpQueuePtr;
printk("lpevent processing spread over %ld processors\n", val);
......
......@@ -69,13 +69,11 @@ struct rt_sigframe {
};
extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
long sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int p6,
int p7, struct pt_regs *regs)
long sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4,
int p6, int p7, struct pt_regs *regs)
{
sigset_t saveset, newset;
......@@ -390,8 +388,6 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
int do_signal(sigset_t *oldset, struct pt_regs *regs)
{
siginfo_t info;
......
......@@ -563,10 +563,6 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 *act,
return ret;
}
extern long sys_rt_sigprocmask(int how, sigset_t *set,
sigset_t *oset, size_t sigsetsize);
/*
* Note: it is necessary to treat how as an unsigned int, with the
* corresponding cast to a signed int to insure that the proper
......@@ -613,10 +609,6 @@ long sys32_rt_sigprocmask(u32 how, compat_sigset_t *set,
return 0;
}
extern long sys_rt_sigpending(sigset_t *set, size_t sigsetsize);
long sys32_rt_sigpending(compat_sigset_t *set, compat_size_t sigsetsize)
{
sigset_t s;
......@@ -683,11 +675,6 @@ static int copy_siginfo_to_user32(siginfo_t32 *d, siginfo_t *s)
return err;
}
extern long sys_rt_sigtimedwait(const sigset_t *uthese,
siginfo_t *uinfo, const struct timespec *uts,
size_t sigsetsize);
long sys32_rt_sigtimedwait(compat_sigset_t *uthese, siginfo_t32 *uinfo,
struct compat_timespec *uts, compat_size_t sigsetsize)
{
......@@ -758,9 +745,6 @@ static siginfo_t * siginfo32to64(siginfo_t *d, siginfo_t32 *s)
return d;
}
extern long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo);
/*
* Note: it is necessary to treat pid and sig as unsigned ints, with the
* corresponding cast to a signed int to insure that the proper conversion
......@@ -786,8 +770,6 @@ long sys32_rt_sigqueueinfo(u32 pid, u32 sig, siginfo_t32 *uinfo)
return ret;
}
extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
int sys32_rt_sigsuspend(compat_sigset_t* unewset, size_t sigsetsize, int p3,
int p4, int p6, int p7, struct pt_regs *regs)
{
......
......@@ -113,7 +113,7 @@ static int smp_iSeries_numProcs(void)
struct ItLpPaca * lpPaca;
np = 0;
for (i=0; i < MAX_PACAS; ++i) {
for (i=0; i < NR_CPUS; ++i) {
lpPaca = paca[i].xLpPacaPtr;
if ( lpPaca->xDynProcStatus < 2 ) {
++np;
......@@ -128,7 +128,7 @@ static int smp_iSeries_probe(void)
unsigned np = 0;
struct ItLpPaca *lpPaca;
for (i=0; i < MAX_PACAS; ++i) {
for (i=0; i < NR_CPUS; ++i) {
lpPaca = paca[i].xLpPacaPtr;
if (lpPaca->xDynProcStatus < 2) {
paca[i].active = 1;
......@@ -144,7 +144,7 @@ static void smp_iSeries_kick_cpu(int nr)
struct ItLpPaca * lpPaca;
/* Verify we have a Paca for processor nr */
if ( ( nr <= 0 ) ||
( nr >= MAX_PACAS ) )
( nr >= NR_CPUS ) )
return;
/* Verify that our partition has a processor nr */
lpPaca = paca[nr].xLpPacaPtr;
......@@ -228,7 +228,7 @@ smp_kick_cpu(int nr)
{
/* Verify we have a Paca for processor nr */
if ( ( nr <= 0 ) ||
( nr >= MAX_PACAS ) )
( nr >= NR_CPUS ) )
return;
/* The information for processor bringup must
......
......@@ -23,7 +23,8 @@
#include <asm/pmc.h>
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid, int large);
void make_slbe(unsigned long esid, unsigned long vsid, int large,
int kernel_segment);
/*
* Build an entry for the base kernel segment and put it into
......@@ -45,7 +46,8 @@ void stab_initialize(unsigned long stab)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
make_slbe(esid, vsid, 0);
make_slbe(esid, vsid, 0, 1);
asm volatile("isync":::"memory");
#endif
} else {
asm volatile("isync; slbia; isync":::"memory");
......@@ -139,10 +141,13 @@ int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
/*
* Create a segment buffer entry for the given esid/vsid pair.
*
* NOTE: A context syncronising instruction is required before and after
* this, in the common case we use exception entry and rfid.
*/
void make_slbe(unsigned long esid, unsigned long vsid, int large)
void make_slbe(unsigned long esid, unsigned long vsid, int large,
int kernel_segment)
{
int kernel_segment = 0;
unsigned long entry, castout_entry;
union {
unsigned long word0;
......@@ -153,42 +158,15 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large)
slb_dword1 data;
} vsid_data;
if (REGION_ID(esid << SID_SHIFT) >= KERNEL_REGION_ID)
kernel_segment = 1;
/*
* Find an empty entry, if one exists.
* Find an empty entry, if one exists. Must start at 0 because
* we use this code to load SLB entry 0 at boot.
*/
for (entry = 0; entry < naca->slb_size; entry++) {
asm volatile("slbmfee %0,%1"
: "=r" (esid_data) : "r" (entry));
if (!esid_data.data.v) {
/*
* Write the new SLB entry.
*/
vsid_data.word0 = 0;
vsid_data.data.vsid = vsid;
vsid_data.data.kp = 1;
if (large)
vsid_data.data.l = 1;
if (kernel_segment)
vsid_data.data.c = 1;
esid_data.word0 = 0;
esid_data.data.esid = esid;
esid_data.data.v = 1;
esid_data.data.index = entry;
/* slbie not needed as no previous mapping existed. */
/* Order update */
asm volatile("isync" : : : "memory");
asm volatile("slbmte %0,%1"
: : "r" (vsid_data),
"r" (esid_data));
/* Order update */
asm volatile("isync" : : : "memory");
return;
}
if (!esid_data.data.v)
goto write_entry;
}
/*
......@@ -211,13 +189,13 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large)
if (castout_entry >= naca->slb_size)
castout_entry = 1;
asm volatile("slbmfee %0,%1" : "=r" (esid_data) : "r" (entry));
} while (esid_data.data.esid == GET_ESID((unsigned long)_get_SP()) &&
esid_data.data.v);
} while (esid_data.data.esid == GET_ESID((unsigned long)_get_SP()));
get_paca()->xStab_data.next_round_robin = castout_entry;
/* slbie not needed as the previous mapping is still valid. */
write_entry:
/*
* Write the new SLB entry.
*/
......@@ -234,10 +212,11 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large)
esid_data.data.v = 1;
esid_data.data.index = entry;
asm volatile("isync" : : : "memory"); /* Order update */
asm volatile("slbmte %0,%1"
: : "r" (vsid_data), "r" (esid_data));
asm volatile("isync" : : : "memory" ); /* Order update */
/*
* No need for an isync before or after this slbmte. The exception
* we enter with and the rfid we exit with are context synchronizing.
*/
asm volatile("slbmte %0,%1" : : "r" (vsid_data), "r" (esid_data));
}
static inline void __ste_allocate(unsigned long esid, unsigned long vsid,
......@@ -246,10 +225,10 @@ static inline void __ste_allocate(unsigned long esid, unsigned long vsid,
if (cpu_has_slb()) {
#ifndef CONFIG_PPC_ISERIES
if (REGION_ID(esid << SID_SHIFT) == KERNEL_REGION_ID)
make_slbe(esid, vsid, 1);
make_slbe(esid, vsid, 1, kernel_segment);
else
#endif
make_slbe(esid, vsid, 0);
make_slbe(esid, vsid, 0, kernel_segment);
} else {
unsigned char top_entry, stab_entry, *segments;
......
......@@ -50,11 +50,6 @@ check_bugs(void)
{
}
int sys_ioperm(unsigned long from, unsigned long num, int on)
{
return -EIO;
}
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
......
......@@ -349,9 +349,8 @@ ProgramCheckException(struct pt_regs *regs)
void
KernelFPUnavailableException(struct pt_regs *regs)
{
printk("Illegal floating point used in kernel "
"(task=0x%p, pc=0x%016lx, trap=0x%08lx)\n",
current, regs->nip, regs->trap);
printk("Illegal floating point used in kernel (task=0x%p, "
"pc=0x%016lx, trap=0x%lx)\n", current, regs->nip, regs->trap);
panic("Unrecoverable FP Unavailable Exception in Kernel");
}
......
......@@ -98,17 +98,38 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_ET_DYN_BASE (0x08000000)
/* Common routine for both 32-bit and 64-bit processes */
#define ELF_CORE_COPY_REGS(gregs, regs) ppc64_elf_core_copy_regs(gregs, regs);
static inline void
ppc64_elf_core_copy_regs(elf_gregset_t dstRegs, struct pt_regs* srcRegs)
static inline void ppc64_elf_core_copy_regs(elf_gregset_t elf_regs,
struct pt_regs *regs)
{
int i;
int gprs = sizeof(struct pt_regs)/sizeof(elf_greg_t64);
int numGPRS = ((sizeof(struct pt_regs)/sizeof(elf_greg_t64)) < ELF_NGREG) ? (sizeof(struct pt_regs)/sizeof(elf_greg_t64)) : ELF_NGREG;
if (gprs > ELF_NGREG)
gprs = ELF_NGREG;
for (i=0; i < numGPRS; i++)
dstRegs[i] = (elf_greg_t)((elf_greg_t64 *)srcRegs)[i];
for (i=0; i < gprs; i++)
elf_regs[i] = (elf_greg_t)((elf_greg_t64 *)regs)[i];
}
#define ELF_CORE_COPY_REGS(gregs, regs) ppc64_elf_core_copy_regs(gregs, regs);
static inline int dump_task_regs(struct task_struct *tsk,
elf_gregset_t *elf_regs)
{
struct pt_regs *regs = tsk->thread.regs;
if (regs)
ppc64_elf_core_copy_regs(*elf_regs, regs);
return 1;
}
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#ifdef CONFIG_SMP
extern void dump_smp_unlazy_fpu(void);
#define ELF_CORE_SYNC dump_smp_unlazy_fpu
#endif
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This could be done in userspace,
......
......@@ -33,17 +33,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
/* A paca entry is required for each logical processor. On systems
* that support hardware multi-threading, this is equal to twice the
* number of physical processors. On LPAR systems, we are required
* to have space for the maximum number of logical processors we
* could ever possibly have. Currently, we are limited to allocating
* 24 processors to a partition which gives 48 logical processors on
* an HMT box. Therefore, we reserve this many paca entries.
*/
#define MAX_PROCESSORS 24
#define MAX_PACAS MAX_PROCESSORS * 2
extern struct paca_struct paca[];
register struct paca_struct *local_paca asm("r13");
#define get_paca() local_paca
......
......@@ -77,7 +77,7 @@ struct device_node *fetch_dev_dn(struct pci_dev *dev);
static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
{
struct device_node *dn = (struct device_node *)(dev->sysdata);
if (dn->devfn == dev->devfn && dn->busno == dev->bus->number)
if (dn->devfn == dev->devfn && dn->busno == (dev->bus->number&0xff))
return dn; /* fast path. sysdata is good */
else
return fetch_dev_dn(dev);
......
......@@ -2,6 +2,7 @@
#define _ASMPPC64_SIGNAL_H
#include <linux/types.h>
#include <asm/siginfo.h>
/* Avoid too many header ordering problems. */
struct siginfo;
......@@ -72,19 +73,19 @@ typedef struct {
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001
#define SA_NOCLDWAIT 0x00000002
#define SA_SIGINFO 0x00000004
#define SA_ONSTACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_NODEFER 0x40000000
#define SA_RESETHAND 0x80000000
#define SA_NOCLDSTOP 0x00000001u
#define SA_NOCLDWAIT 0x00000002u
#define SA_SIGINFO 0x00000004u
#define SA_ONSTACK 0x08000000u
#define SA_RESTART 0x10000000u
#define SA_NODEFER 0x40000000u
#define SA_RESETHAND 0x80000000u
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
#define SA_INTERRUPT 0x20000000u /* dummy -- ignored */
#define SA_RESTORER 0x04000000
#define SA_RESTORER 0x04000000u
/*
* sigaltstack controls
......@@ -143,6 +144,16 @@ typedef struct sigaltstack {
size_t ss_size;
} stack_t;
struct pt_regs;
struct timespec;
extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
extern long sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize);
extern long sys_rt_sigpending(sigset_t *set, size_t sigsetsize);
extern long sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
const struct timespec *uts, size_t sigsetsize);
extern long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo);
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
struct pt_regs;
......
......@@ -83,6 +83,7 @@ extern void show_regs(struct pt_regs * regs);
extern void flush_instruction_cache(void);
extern int _get_PVR(void);
extern void giveup_fpu(struct task_struct *);
extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
......
......@@ -206,6 +206,7 @@ copy_from_user(void *to, const void *from, unsigned long n)
return __copy_tofrom_user(to, from, n);
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
memset(to + over, 0, over);
return __copy_tofrom_user(to, from, n - over) + over;
}
return n;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment