Commit d27b54cc authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/linux-ia64-release-2.6.11

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 427b96f1 6eb0a291
......@@ -158,14 +158,6 @@ config IA64_BRL_EMU
depends on ITANIUM
default y
config ITANIUM_BSTEP_SPECIFIC
bool "Itanium B-step specific code"
depends on ITANIUM
help
Select this option to build a kernel for an Itanium prototype system
with a B-step CPU. You have a B-step CPU if the "revision" field in
/proc/cpuinfo has a value in the range from 1 to 4.
# align cache-sensitive data to 128 bytes
config IA64_L1_CACHE_SHIFT
int
......
......@@ -46,8 +46,6 @@ ifeq ($(GCC_VERSION),0304)
cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
endif
cflags-$(CONFIG_ITANIUM_BSTEP_SPECIFIC) += -mb-step
CFLAGS += $(cflags-y)
head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
......
......@@ -73,7 +73,6 @@ CONFIG_ITANIUM=y
CONFIG_IA64_PAGE_SIZE_16KB=y
# CONFIG_IA64_PAGE_SIZE_64KB is not set
CONFIG_IA64_BRL_EMU=y
# CONFIG_ITANIUM_BSTEP_SPECIFIC is not set
CONFIG_IA64_L1_CACHE_SHIFT=6
# CONFIG_NUMA is not set
# CONFIG_VIRTUAL_MEM_MAP is not set
......
/*
* IA32 Architecture-specific signal handling support.
*
* Copyright (C) 1999, 2001-2002 Hewlett-Packard Co
* Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2000 VA Linux Co
......@@ -970,11 +970,10 @@ ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
}
asmlinkage long
sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7,
unsigned long stack)
sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5,
int arg6, int arg7, struct pt_regs regs)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
unsigned long esp = (unsigned int) regs->r12;
unsigned long esp = (unsigned int) regs.r12;
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8);
sigset_t set;
int eax;
......@@ -993,7 +992,7 @@ sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext_ia32(regs, &frame->sc, &eax))
if (restore_sigcontext_ia32(&regs, &frame->sc, &eax))
goto badframe;
return eax;
......@@ -1003,11 +1002,10 @@ sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int
}
asmlinkage long
sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7,
unsigned long stack)
sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4,
int arg5, int arg6, int arg7, struct pt_regs regs)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
unsigned long esp = (unsigned int) regs->r12;
unsigned long esp = (unsigned int) regs.r12;
struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4);
sigset_t set;
int eax;
......@@ -1023,7 +1021,7 @@ sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5,
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext_ia32(regs, &frame->uc.uc_mcontext, &eax))
if (restore_sigcontext_ia32(&regs, &frame->uc.uc_mcontext, &eax))
goto badframe;
/* It is more difficult to avoid calling this function than to
......
......@@ -6,7 +6,7 @@
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000-2003 Hewlett-Packard Co
* Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
*
......@@ -1436,7 +1436,7 @@ sys32_waitpid (int pid, unsigned int *stat_addr, int options)
}
static unsigned int
ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int *val)
ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
{
size_t copied;
unsigned int ret;
......@@ -1446,7 +1446,7 @@ ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr,
}
static unsigned int
ia32_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int val)
ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
{
if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
......@@ -1751,25 +1751,16 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u
return 0;
}
/*
* Note that the IA32 version of `ptrace' calls the IA64 routine for
* many of the requests. This will only work for requests that do
* not need access to the calling processes `pt_regs' which is located
* at the address of `stack'. Once we call the IA64 `sys_ptrace' then
* the address of `stack' will not be the address of the `pt_regs'.
*/
asmlinkage long
sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
long arg4, long arg5, long arg6, long arg7, long stack)
sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
struct task_struct *child;
unsigned int value, tmp;
long i, ret;
lock_kernel();
if (request == PTRACE_TRACEME) {
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
ret = sys_ptrace(request, pid, addr, data);
goto out;
}
......@@ -1786,7 +1777,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
goto out_tsk;
if (request == PTRACE_ATTACH) {
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
ret = sys_ptrace(request, pid, addr, data);
goto out_tsk;
}
......@@ -1797,7 +1788,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: /* read word at location addr */
ret = ia32_peek(regs, child, addr, &value);
ret = ia32_peek(child, addr, &value);
if (ret == 0)
ret = put_user(value, (unsigned int __user *) compat_ptr(data));
else
......@@ -1806,7 +1797,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */
ret = ia32_poke(regs, child, addr, data);
ret = ia32_poke(child, addr, data);
goto out_tsk;
case PTRACE_PEEKUSR: /* read word at addr in USER area */
......@@ -1882,7 +1873,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
case PTRACE_KILL:
case PTRACE_SINGLESTEP: /* execute chile for one instruction */
case PTRACE_DETACH: /* detach a process */
ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
ret = sys_ptrace(request, pid, addr, data);
break;
default:
......@@ -1905,9 +1896,9 @@ typedef struct {
asmlinkage long
sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, long stack)
long arg2, long arg3, long arg4, long arg5, long arg6,
long arg7, struct pt_regs pt)
{
struct pt_regs *pt = (struct pt_regs *) &stack;
stack_t uss, uoss;
ia32_stack_t buf32;
int ret;
......@@ -1928,7 +1919,7 @@ sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
}
set_fs(KERNEL_DS);
ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
(stack_t __user *) &uoss, pt->r12);
(stack_t __user *) &uoss, pt.r12);
current->sas_ss_size = buf32.ss_size;
set_fs(old_fs);
out:
......
......@@ -193,9 +193,17 @@ void foo(void)
DEFINE(IA64_CLONE_VM, CLONE_VM);
BLANK();
DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET,
offsetof (struct cpuinfo_ia64, nsec_per_cyc));
DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET,
offsetof (struct cpuinfo_ia64, ptce_base));
DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET,
offsetof (struct cpuinfo_ia64, ptce_count));
DEFINE(IA64_CPUINFO_PTCE_STRIDE_OFFSET,
offsetof (struct cpuinfo_ia64, ptce_stride));
BLANK();
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
offsetof (struct timespec, tv_nsec));
DEFINE(CLONE_SETTLS_BIT, 19);
#if CLONE_SETTLS != (1<<19)
......@@ -203,19 +211,16 @@ void foo(void)
#endif
BLANK();
/* used by arch/ia64/kernel/mca_asm.S */
DEFINE(IA64_CPUINFO_PERCPU_PADDR, offsetof (struct cpuinfo_ia64, percpu_paddr));
DEFINE(IA64_CPUINFO_PAL_PADDR, offsetof (struct cpuinfo_ia64, pal_paddr));
DEFINE(IA64_CPUINFO_PA_MCA_INFO, offsetof (struct cpuinfo_ia64, ia64_pa_mca_data));
DEFINE(IA64_MCA_PROC_STATE_DUMP, offsetof (struct ia64_mca_cpu_s, ia64_mca_proc_state_dump));
DEFINE(IA64_MCA_STACK, offsetof (struct ia64_mca_cpu_s, ia64_mca_stack));
DEFINE(IA64_MCA_STACKFRAME, offsetof (struct ia64_mca_cpu_s, ia64_mca_stackframe));
DEFINE(IA64_MCA_BSPSTORE, offsetof (struct ia64_mca_cpu_s, ia64_mca_bspstore));
DEFINE(IA64_INIT_STACK, offsetof (struct ia64_mca_cpu_s, ia64_init_stack));
/* used by head.S */
DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET,
offsetof (struct ia64_mca_cpu, proc_state_dump));
DEFINE(IA64_MCA_CPU_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, stack));
DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
offsetof (struct ia64_mca_cpu, stackframe));
DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
offsetof (struct ia64_mca_cpu, rbstore));
DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, init_stack));
BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
......
......@@ -415,8 +415,8 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
* Abstraction Layer chapter 11 in ADAG
*/
static efi_memory_desc_t *
pal_code_memdesc (void)
void *
efi_get_pal_addr (void)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
......@@ -474,51 +474,31 @@ pal_code_memdesc (void)
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
#endif
return md;
return __va(md->phys_addr);
}
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
__FUNCTION__);
return NULL;
}
void
efi_get_pal_addr (void)
{
efi_memory_desc_t *md = pal_code_memdesc();
u64 vaddr, mask;
struct cpuinfo_ia64 *cpuinfo;
if (md != NULL) {
vaddr = PAGE_OFFSET + md->phys_addr;
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
cpuinfo = (struct cpuinfo_ia64 *)__va(ia64_get_kr(IA64_KR_PA_CPU_INFO));
cpuinfo->pal_base = vaddr & mask;
cpuinfo->pal_paddr = pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL));
}
}
void
efi_map_pal_code (void)
{
efi_memory_desc_t *md = pal_code_memdesc();
u64 vaddr, mask, psr;
if (md != NULL) {
void *pal_vaddr = efi_get_pal_addr ();
u64 psr;
vaddr = PAGE_OFFSET + md->phys_addr;
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
if (!pal_vaddr)
return;
/*
* Cannot write to CRx with PSR.ic=1
*/
psr = ia64_clear_ic();
ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT);
ia64_set_psr(psr); /* restore psr */
ia64_srlz_i();
}
/*
* Cannot write to CRx with PSR.ic=1
*/
psr = ia64_clear_ic();
ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT);
ia64_set_psr(psr); /* restore psr */
ia64_srlz_i();
}
void __init
......
......@@ -558,7 +558,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
.ret3: br.cond.sptk ia64_leave_syscall
.ret3: br.cond.sptk .work_pending_syscall_end
strace_error:
ld8 r3=[r2] // load pt_regs.r8
......@@ -621,10 +621,7 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
PT_REGS_UNWIND_INFO(0)
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
;;
.mem.offset 0,0; (p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0; (p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
mov r10=r0 // clear error indication in r10
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
END(ia64_ret_from_syscall)
// fall through
......@@ -709,27 +706,23 @@ ENTRY(ia64_leave_syscall)
ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
mov b7=r0 // clear b7
;;
ld8 r23=[r3],PT(R9)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
ld8 r18=[r2],PT(R8)-PT(B6) // load b6
ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
ld8 r18=[r2],PT(R9)-PT(B6) // load b6
(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;;
mov r16=ar.bsp // M2 get existing backing store pointer
(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
(p6) br.cond.spnt .work_pending
(p6) br.cond.spnt .work_pending_syscall
;;
// start restoring the state saved on the kernel stack (struct pt_regs):
ld8.fill r8=[r2],16
ld8.fill r9=[r3],16
ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
ld8 r11=[r3],PT(CR_IIP)-PT(R11)
mov f6=f0 // clear f6
;;
invala // M0|1 invalidate ALAT
rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection
mov f9=f0 // clear f9
ld8.fill r10=[r2],16
ld8.fill r11=[r3],16
mov f7=f0 // clear f7
;;
ld8 r29=[r2],16 // load cr.ipsr
ld8 r28=[r3],16 // load cr.iip
mov f8=f0 // clear f8
......@@ -760,7 +753,7 @@ ENTRY(ia64_leave_syscall)
;;
srlz.d // M0 ensure interruption collection is off
ld8.fill r13=[r3],16
nop.i 0
mov f7=f0 // clear f7
;;
ld8.fill r12=[r2] // restore r12 (sp)
ld8.fill r15=[r3] // restore r15
......@@ -770,8 +763,8 @@ ENTRY(ia64_leave_syscall)
(pUStk) st1 [r14]=r17
mov b6=r18 // I0 restore b6
;;
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
mov r14=r0 // clear r14
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
(pKStk) br.cond.dpnt.many skip_rbs_switch
mov.m ar.ccv=r0 // clear ar.ccv
......@@ -987,7 +980,7 @@ dont_preserve_current_frame:
shladd in0=loc1,3,r17
mov in1=0
;;
.align 32
TEXT_ALIGN(32)
rse_clear_invalid:
#ifdef CONFIG_ITANIUM
// cycle 0
......@@ -1083,6 +1076,12 @@ skip_rbs_switch:
* On exit:
* p6 = TRUE if work-pending-check needs to be redone
*/
.work_pending_syscall:
add r2=-8,r2
add r3=-8,r3
;;
st8 [r2]=r8
st8 [r3]=r10
.work_pending:
tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context?
(p6) br.cond.sptk.few .sigdelayed
......@@ -1104,13 +1103,13 @@ skip_rbs_switch:
;;
(pKStk) st4 [r20]=r0 // preempt_count() <- 0
#endif
(pLvSys)br.cond.sptk.many .work_processed_syscall // re-check
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // re-check
.notify:
(pUStk) br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
(pLvSys)br.cond.sptk.many .work_processed_syscall // don't re-check
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // don't re-check
// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
......@@ -1121,9 +1120,17 @@ skip_rbs_switch:
.sigdelayed:
br.call.sptk.many rp=do_sigdelayed
cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check
(pLvSys)br.cond.sptk.many .work_processed_syscall // re-check
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // re-check
.work_pending_syscall_end:
adds r2=PT(R8)+16,r12
adds r3=PT(R10)+16,r12
;;
ld8 r8=[r2]
ld8 r10=[r3]
br.cond.sptk.many .work_processed_syscall // re-check
END(ia64_leave_kernel)
ENTRY(handle_syscall_error)
......@@ -1135,17 +1142,11 @@ ENTRY(handle_syscall_error)
*/
PT_REGS_UNWIND_INFO(0)
ld8 r3=[r2] // load pt_regs.r8
sub r9=0,r8 // negate return value to get errno
;;
mov r10=-1 // return -1 in pt_regs.r10 to indicate error
cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
adds r3=16,r2 // r3=&pt_regs.r10
;;
(p6) mov r9=r8
(p6) mov r10=0
;;
.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
(p7) mov r10=-1
(p7) sub r8=0,r8 // negate return value to get errno
br.cond.sptk ia64_leave_syscall
END(handle_syscall_error)
......
......@@ -5,7 +5,7 @@
* to set up the kernel's global pointer and jump to the kernel
* entry point.
*
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems
......@@ -232,21 +232,6 @@ start_ap:
;;
(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
#ifdef CONFIG_IA64_EARLY_PRINTK
.rodata
alive_msg:
stringz "I'm alive and well\n"
alive_msg_end:
.previous
alloc r2=ar.pfs,0,0,2,0
movl out0=alive_msg
movl out1=alive_msg_end-alive_msg-1
;;
br.call.sptk.many rp=early_printk
1: // force new bundle
#endif /* CONFIG_IA64_EARLY_PRINTK */
#ifdef CONFIG_SMP
(isAP) br.call.sptk.many rp=start_secondary
.ret0:
......@@ -267,7 +252,9 @@ alive_msg_end:
;;
ld8 out0=[r3]
br.call.sptk.many b0=console_print
self: br.sptk.many self // endless loop
self: hint @pause
br.sptk.many self // endless loop
END(_start)
GLOBAL_ENTRY(ia64_save_debug_regs)
......
......@@ -548,7 +548,7 @@ ENTRY(dirty_bit)
#endif
mov pr=r31,-1 // restore pr
rfi
END(idirty_bit)
END(dirty_bit)
.org ia64_ivt+0x2400
/////////////////////////////////////////////////////////////////////////////////////////
......
......@@ -67,6 +67,7 @@
#include <asm/delay.h>
#include <asm/machvec.h>
#include <asm/meminit.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/system.h>
......@@ -86,6 +87,12 @@
ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state;
ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state;
u64 ia64_mca_serialize;
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
unsigned long __per_cpu_mca[NR_CPUS];
/* In mca_asm.S */
extern void ia64_monarch_init_handler (void);
......@@ -1195,6 +1202,53 @@ static struct irqaction mca_cpep_irqaction = {
};
#endif /* CONFIG_ACPI */
/* Do per-CPU MCA-related initialization. */
void __devinit
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
if (smp_processor_id() == 0) {
void *mca_data;
int cpu;
mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
* NR_CPUS);
for (cpu = 0; cpu < NR_CPUS; cpu++) {
__per_cpu_mca[cpu] = __pa(mca_data);
mca_data += sizeof(struct ia64_mca_cpu);
}
}
/*
* The MCA info structure was allocated earlier and its
* physical address saved in __per_cpu_mca[cpu]. Copy that
* address * to ia64_mca_data so we can access it as a per-CPU
* variable.
*/
__get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
/*
* Stash away a copy of the PTE needed to map the per-CPU page.
* We may need it during MCA recovery.
*/
__get_cpu_var(ia64_mca_per_cpu_pte) =
pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
/*
* Also, stash away a copy of the PAL address and the PTE
* needed to map it.
*/
pal_vaddr = efi_get_pal_addr();
if (!pal_vaddr)
return;
__get_cpu_var(ia64_mca_pal_base) =
GRANULEROUNDDOWN((unsigned long) pal_vaddr);
__get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
PAGE_KERNEL));
}
/*
* ia64_mca_init
*
......
......@@ -144,24 +144,26 @@ ia64_os_mca_done_dump:
// The following code purges TC and TR entries. Then reload all TC entries.
// Purge percpu data TC entries.
begin_tlb_purge_and_reload:
GET_PERCPU_PADDR(r2) // paddr of percpu_paddr in cpuinfo struct
;;
mov r17=r2
;;
adds r17=8,r17
#define O(member) IA64_CPUINFO_##member##_OFFSET
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
;;
ld8 r18=[r17],8 // r18=ptce_base
;;
ld4 r19=[r17],4 // r19=ptce_count[0]
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
;;
ld4 r20=[r17],4 // r20=ptce_count[1]
ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
ld4 r19=[r2],4 // r19=ptce_count[0]
ld4 r21=[r17],4 // r21=ptce_stride[0]
;;
ld4 r21=[r17],4 // r21=ptce_stride[0]
ld4 r20=[r2] // r20=ptce_count[1]
ld4 r22=[r17] // r22=ptce_stride[1]
mov r24=0
;;
ld4 r22=[r17],4 // r22=ptce_stride[1]
adds r20=-1,r20
;;
#undef O
2:
cmp.ltu p6,p7=r24,r19
(p7) br.cond.dpnt.few 4f
......@@ -201,9 +203,9 @@ begin_tlb_purge_and_reload:
srlz.d
;;
// 3. Purge ITR for PAL code.
adds r17=40,r23
GET_THIS_PADDR(r2, ia64_mca_pal_base)
;;
ld8 r16=[r17]
ld8 r16=[r2]
mov r18=IA64_GRANULE_SHIFT<<2
;;
ptr.i r16,r18
......@@ -246,16 +248,15 @@ begin_tlb_purge_and_reload:
srlz.d
;;
// 2. Reload DTR register for PERCPU data.
GET_PERCPU_PADDR(r2) // paddr of percpu_paddr in cpuinfo struct
GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
;;
mov r17=r2
movl r16=PERCPU_ADDR // vaddr
movl r18=PERCPU_PAGE_SHIFT<<2
;;
mov cr.itir=r18
mov cr.ifa=r16
;;
ld8 r18=[r17] // pte
ld8 r18=[r2] // load per-CPU PTE
mov r16=IA64_TR_PERCPU_DATA;
;;
itr.d dtr[r16]=r18
......@@ -263,13 +264,13 @@ begin_tlb_purge_and_reload:
srlz.d
;;
// 3. Reload ITR for PAL code.
GET_CPUINFO_PAL_PADDR(r2) // paddr of pal_paddr in cpuinfo struct
GET_THIS_PADDR(r2, ia64_mca_pal_pte)
;;
mov r17=r2
ld8 r18=[r2] // load PAL PTE
;;
ld8 r18=[r17],8 // pte
GET_THIS_PADDR(r2, ia64_mca_pal_base)
;;
ld8 r16=[r17] // vaddr
ld8 r16=[r2] // load PAL vaddr
mov r19=IA64_GRANULE_SHIFT<<2
;;
mov cr.itir=r19
......@@ -308,14 +309,18 @@ err:
done_tlb_purge_and_reload:
// Setup new stack frame for OS_MCA handling
GET_MCA_BSPSTORE(r2) // paddr of bspstore save area
GET_MCA_STACKFRAME(r3);; // paddr of stack frame save area
GET_THIS_PADDR(r2, ia64_mca_data)
;;
add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
;;
rse_switch_context(r6,r3,r2);; // RSC management in this new context
GET_MCA_STACK(r2);; // paddr of stack save area
// stack size must be same as C array
addl r2=8*1024-16,r2;; // stack base @ bottom of array
mov r12=r2 // allow 16 bytes of scratch
// (C calling convention)
GET_THIS_PADDR(r2, ia64_mca_data)
;;
add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
;;
mov r12=r2 // establish new stack-pointer
// Enter virtual mode from physical mode
VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
......@@ -331,7 +336,10 @@ ia64_os_mca_virtual_begin:
ia64_os_mca_virtual_end:
// restore the original stack frame here
GET_MCA_STACKFRAME(r2);; // phys addr of MCA save area
GET_THIS_PADDR(r2, ia64_mca_data)
;;
add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
;;
movl r4=IA64_PSR_MC
;;
rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
......@@ -372,8 +380,10 @@ ia64_os_mca_dispatch_end:
ia64_os_mca_proc_state_dump:
// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
// to virtual addressing mode.
GET_MCA_DUMP_PADDR(r2);; // phys addr of MCA save area
GET_THIS_PADDR(r2, ia64_mca_data)
;;
add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
;;
// save ar.NaT
mov r5=ar.unat // ar.unat
......@@ -603,7 +613,9 @@ end_os_mca_dump:
ia64_os_mca_proc_state_restore:
// Restore bank1 GR16-31
GET_MCA_DUMP_PADDR(r2);; // phys addr of proc state dump area
GET_THIS_PADDR(r2, ia64_mca_data)
;;
add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
restore_GRs: // restore bank-1 GRs 16-31
bsw.1;;
......
......@@ -37,10 +37,10 @@
* go virtual and don't want to destroy the iip or ipsr.
*/
#define MINSTATE_START_SAVE_MIN_PHYS \
(pKStk) mov r3=ar.k3;; \
(pKStk) addl r3=IA64_CPUINFO_PA_MCA_INFO,r3;; \
(pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
(pKStk) ld8 r3 = [r3];; \
(pKStk) addl r3=IA64_INIT_STACK,r3;; \
(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
......
......@@ -5,13 +5,13 @@
* The initial version of perfmon.c was written by
* Ganesh Venkitachalam, IBM Corp.
*
* Then it was modified for perfmon-1.x by Stephane Eranian and
* Then it was modified for perfmon-1.x by Stephane Eranian and
* David Mosberger, Hewlett Packard Co.
*
*
* Version Perfmon-2.x is a rewrite of perfmon-1.x
* by Stephane Eranian, Hewlett Packard Co.
* by Stephane Eranian, Hewlett Packard Co.
*
* Copyright (C) 1999-2003 Hewlett Packard Co
* Copyright (C) 1999-2003, 2005 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*
......@@ -4778,10 +4778,8 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
* system-call entry point (must return long)
*/
asmlinkage long
sys_perfmonctl (int fd, int cmd, void __user *arg, int count, long arg5, long arg6, long arg7,
long arg8, long stack)
sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
{
struct pt_regs *regs = (struct pt_regs *)&stack;
struct file *file = NULL;
pfm_context_t *ctx = NULL;
unsigned long flags = 0UL;
......@@ -4905,7 +4903,7 @@ sys_perfmonctl (int fd, int cmd, void __user *arg, int count, long arg5, long ar
if (unlikely(ret)) goto abort_locked;
skip_fd:
ret = (*func)(ctx, args_k, count, regs);
ret = (*func)(ctx, args_k, count, ia64_task_regs(current));
call_made = 1;
......@@ -6671,8 +6669,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
}
#else /* !CONFIG_PERFMON */
asmlinkage long
sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, long arg7,
long arg8, long stack)
sys_perfmonctl (int fd, int cmd, void *arg, int count)
{
return -ENOSYS;
}
......
This diff is collapsed.
......@@ -60,7 +60,6 @@
unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset);
#endif
unsigned long __per_cpu_mca[NR_CPUS];
DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
......@@ -388,7 +387,7 @@ setup_arch (char **cmdline_p)
/* enable IA-64 Machine Check Abort Handling unless disabled */
if (!strstr(saved_command_line, "nomca"))
ia64_mca_init();
platform_setup(cmdline_p);
paging_init();
}
......@@ -602,7 +601,6 @@ void
cpu_init (void)
{
extern void __devinit ia64_mmu_init (void *);
extern void set_mca_pointer (struct cpuinfo_ia64 *, void *);
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
......@@ -611,6 +609,14 @@ cpu_init (void)
cpu_data = per_cpu_init();
/*
* We set ar.k3 so that assembly code in MCA handler can compute
* physical addresses of per cpu variables with a simple:
* phys = ar.k3 + &per_cpu_var
*/
ia64_set_kr(IA64_KR_PER_CPU_DATA,
ia64_tpa(cpu_data) - (long) __per_cpu_start);
get_max_cacheline_size();
/*
......@@ -657,7 +663,7 @@ cpu_init (void)
BUG();
ia64_mmu_init(ia64_imva(cpu_data));
set_mca_pointer(cpu_info, cpu_data);
ia64_mca_cpu_init(ia64_imva(cpu_data));
#ifdef CONFIG_IA32_SUPPORT
ia32_cpu_init();
......
......@@ -84,12 +84,11 @@ ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch
}
asmlinkage long
sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, long arg3, long arg4,
long arg5, long arg6, long arg7, long stack)
sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2,
long arg3, long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{
struct pt_regs *pt = (struct pt_regs *) &stack;
return do_sigaltstack(uss, uoss, pt->r12);
return do_sigaltstack(uss, uoss, regs.r12);
}
static long
......
......@@ -2,7 +2,7 @@
* This file contains various system calls that have different calling
* conventions on different platforms.
*
* Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co
* Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
......@@ -163,10 +163,9 @@ ia64_brk (unsigned long brk)
* and r9) as this is faster than doing a copy_to_user().
*/
asmlinkage long
sys_pipe (long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7, long stack)
sys_pipe (void)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
struct pt_regs *regs = ia64_task_regs(current);
int fd[2];
int retval;
......
......@@ -358,11 +358,10 @@ struct illegal_op_return {
};
struct illegal_op_return
ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5,
unsigned long arg6, unsigned long arg7, unsigned long stack)
ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7,
struct pt_regs regs)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
struct illegal_op_return rv;
struct siginfo si;
char buf[128];
......@@ -371,19 +370,19 @@ ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
{
extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
rv = ia64_emulate_brl(regs, ec);
rv = ia64_emulate_brl(&regs, ec);
if (rv.fkt != (unsigned long) -1)
return rv;
}
#endif
sprintf(buf, "IA-64 Illegal operation fault");
die_if_kernel(buf, regs, 0);
die_if_kernel(buf, &regs, 0);
memset(&si, 0, sizeof(si));
si.si_signo = SIGILL;
si.si_code = ILL_ILLOPC;
si.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
force_sig_info(SIGILL, &si, current);
rv.fkt = 0;
return rv;
......@@ -391,11 +390,10 @@ ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
void
ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned long iim, unsigned long itir, unsigned long arg5,
unsigned long arg6, unsigned long arg7, unsigned long stack)
unsigned long iim, unsigned long itir, long arg5, long arg6,
long arg7, struct pt_regs regs)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
unsigned long code, error = isr;
unsigned long code, error = isr, iip;
struct siginfo siginfo;
char buf[128];
int result, sig;
......@@ -415,10 +413,12 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
* This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
* the lfetch.
*/
ia64_psr(regs)->ed = 1;
ia64_psr(&regs)->ed = 1;
return;
}
iip = regs.cr_iip + ia64_psr(&regs)->ri;
switch (vector) {
case 24: /* General Exception */
code = (isr >> 4) & 0xf;
......@@ -428,8 +428,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
if (code == 8) {
# ifdef CONFIG_IA64_PRINT_HAZARDS
printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
regs->pr);
current->comm, current->pid,
regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
# endif
return;
}
......@@ -437,14 +437,14 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 25: /* Disabled FP-Register */
if (isr & 2) {
disabled_fph_fault(regs);
disabled_fph_fault(&regs);
return;
}
sprintf(buf, "Disabled FPL fault---not supposed to happen!");
break;
case 26: /* NaT Consumption */
if (user_mode(regs)) {
if (user_mode(&regs)) {
void __user *addr;
if (((isr >> 4) & 0xf) == 2) {
......@@ -456,7 +456,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
/* register NaT consumption */
sig = SIGILL;
code = ILL_ILLOPN;
addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
addr = (void __user *) (regs.cr_iip
+ ia64_psr(&regs)->ri);
}
siginfo.si_signo = sig;
siginfo.si_code = code;
......@@ -467,17 +468,17 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
siginfo.si_isr = isr;
force_sig_info(sig, &siginfo, current);
return;
} else if (ia64_done_with_exception(regs))
} else if (ia64_done_with_exception(&regs))
return;
sprintf(buf, "NaT consumption");
break;
case 31: /* Unsupported Data Reference */
if (user_mode(regs)) {
if (user_mode(&regs)) {
siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_ILLOPN;
siginfo.si_errno = 0;
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *) iip;
siginfo.si_imm = vector;
siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr;
......@@ -490,7 +491,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 29: /* Debug */
case 35: /* Taken Branch Trap */
case 36: /* Single Step Trap */
if (fsys_mode(current, regs)) {
if (fsys_mode(current, &regs)) {
extern char __kernel_syscall_via_break[];
/*
* Got a trap in fsys-mode: Taken Branch Trap and Single Step trap
......@@ -498,13 +499,13 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
*/
if (unlikely(vector == 29)) {
die("Got debug trap in fsys-mode---not supposed to happen!",
regs, 0);
&regs, 0);
return;
}
/* re-do the system call via break 0x100000: */
regs->cr_iip = (unsigned long) __kernel_syscall_via_break;
ia64_psr(regs)->ri = 0;
ia64_psr(regs)->cpl = 3;
regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
ia64_psr(&regs)->ri = 0;
ia64_psr(&regs)->cpl = 3;
return;
}
switch (vector) {
......@@ -515,8 +516,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
* Erratum 10 (IFA may contain incorrect address) now has
* "NoFix" status. There are no plans for fixing this.
*/
if (ia64_psr(regs)->is == 0)
ifa = regs->cr_iip;
if (ia64_psr(&regs)->is == 0)
ifa = regs.cr_iip;
#endif
break;
case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
......@@ -533,12 +534,12 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 32: /* fp fault */
case 33: /* fp trap */
result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
siginfo.si_signo = SIGFPE;
siginfo.si_errno = 0;
siginfo.si_code = FPE_FLTINV;
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *) iip;
siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr;
siginfo.si_imm = 0;
......@@ -554,19 +555,18 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
* interesting work (e.g., signal delivery is done in the kernel
* exit path).
*/
ia64_psr(regs)->lp = 0;
ia64_psr(&regs)->lp = 0;
return;
} else {
/* Unimplemented Instr. Address Trap */
if (user_mode(regs)) {
if (user_mode(&regs)) {
siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_BADIADDR;
siginfo.si_errno = 0;
siginfo.si_flags = 0;
siginfo.si_isr = 0;
siginfo.si_imm = 0;
siginfo.si_addr = (void __user *)
(regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *) iip;
force_sig_info(SIGILL, &siginfo, current);
return;
}
......@@ -576,23 +576,23 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 45:
#ifdef CONFIG_IA32_SUPPORT
if (ia32_exception(regs, isr) == 0)
if (ia32_exception(&regs, isr) == 0)
return;
#endif
printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
regs->cr_iip, ifa, isr);
iip, ifa, isr);
force_sig(SIGSEGV, current);
break;
case 46:
#ifdef CONFIG_IA32_SUPPORT
if (ia32_intercept(regs, isr) == 0)
if (ia32_intercept(&regs, isr) == 0)
return;
#endif
printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
regs->cr_iip, ifa, isr, iim);
iip, ifa, isr, iim);
force_sig(SIGSEGV, current);
return;
......@@ -604,6 +604,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
sprintf(buf, "Fault %lu", vector);
break;
}
die_if_kernel(buf, regs, error);
die_if_kernel(buf, &regs, error);
force_sig(SIGILL, current);
}
......@@ -178,7 +178,7 @@ find_memory (void)
void *
per_cpu_init (void)
{
void *cpu_data, *mca_data;
void *cpu_data;
int cpu;
/*
......@@ -189,14 +189,11 @@ per_cpu_init (void)
if (smp_processor_id() == 0) {
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
mca_data = alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS);
for (cpu = 0; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
__per_cpu_mca[cpu] = (unsigned long)__pa(mca_data);
mca_data += PERCPU_MCA_SIZE;
}
}
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
......
......@@ -26,7 +26,6 @@
#include <asm/meminit.h>
#include <asm/numa.h>
#include <asm/sections.h>
#include <asm/mca.h>
/*
* Track per-node information needed to setup the boot memory allocator, the
......@@ -294,9 +293,6 @@ static int early_nr_cpus_node(int node)
* |------------------------|
* | local ia64_node_data |
* |------------------------|
* | MCA/INIT data * |
* | cpus_on_this_node |
* |------------------------|
* | ??? |
* |________________________|
*
......@@ -310,7 +306,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
{
unsigned long epfn, cpu, cpus, phys_cpus;
unsigned long pernodesize = 0, pernode, pages, mapsize;
void *cpu_data, *mca_data_phys;
void *cpu_data;
struct bootmem_data *bdp = &mem_data[node].bootmem_data;
epfn = (start + len) >> PAGE_SHIFT;
......@@ -339,7 +335,6 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
pernodesize += node * L1_CACHE_BYTES;
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
pernodesize += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
pernodesize = PAGE_ALIGN(pernodesize);
pernode = NODEDATA_ALIGN(start, node);
......@@ -362,9 +357,6 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
mem_data[node].pgdat->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
mca_data_phys = (void *)pernode;
pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
/*
* Copy the static per-cpu data into the region we
* just set aside and then setup __per_cpu_offset
......@@ -374,18 +366,6 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
if (node == node_cpuid[cpu].nid) {
memcpy(__va(cpu_data), __phys_per_cpu_start,
__per_cpu_end - __per_cpu_start);
if ((cpu == 0) || (node_cpuid[cpu].phys_id > 0)) {
/*
* The memory for the cpuinfo structure is allocated
* here, but the data in the structure is initialized
* later. Save the physical address of the MCA save
* area in __per_cpu_mca[cpu]. When the cpuinfo struct
* is initialized, the value in __per_cpu_mca[cpu]
* will be put in the cpuinfo structure.
*/
__per_cpu_mca[cpu] = __pa(mca_data_phys);
mca_data_phys += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t));
}
__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
__per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
......
......@@ -40,7 +40,6 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
extern void ia64_tlb_init (void);
extern void efi_get_pal_addr (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
......@@ -292,27 +291,6 @@ setup_gate (void)
ia64_patch_gate();
}
void
set_mca_pointer(struct cpuinfo_ia64 *cpuinfo, void *cpu_data)
{
void *my_cpu_data = ia64_imva(cpu_data);
/*
* The MCA info structure was allocated earlier and a physical address pointer
* saved in __per_cpu_mca[cpu]. Move that pointer into the cpuinfo structure.
*/
cpuinfo->ia64_pa_mca_data = (__u64 *)__per_cpu_mca[smp_processor_id()];
cpuinfo->percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(cpuinfo));
/*
* Set pal_base and pal_paddr in cpuinfo structure.
*/
efi_get_pal_addr();
}
void __devinit
ia64_mmu_init (void *my_cpu_data)
{
......
......@@ -71,7 +71,7 @@ pci_sal_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
u64 addr, mode, data = 0;
int result = 0;
if ((seg > 255) || (bus > 255) || (devfn > 255) || (reg > 4095))
if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
if ((seg | reg) <= 255) {
......
......@@ -13,7 +13,7 @@
#include <asm/sn/arch.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pda.h>
#include "shubio.h"
#include <asm/sn/shubio.h>
#include <asm/nodedata.h>
#include <asm/delay.h>
......
......@@ -10,7 +10,7 @@
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
#include <asm/sn/addrs.h>
#include "shubio.h"
#include <asm/sn/shubio.h>
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
......
......@@ -13,7 +13,7 @@
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
#include <asm/sn/addrs.h>
#include "shubio.h"
#include <asm/sn/shubio.h>
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
......
......@@ -36,9 +36,7 @@
#include <asm/sn/intr.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/nodepda.h>
/* This is ugly and jbarnes has promised me to fix this later */
#include "../../arch/ia64/sn/include/shubio.h"
#include <asm/sn/shubio.h>
MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
MODULE_DESCRIPTION("SGI Altix RTC Timer");
......
......@@ -14,7 +14,7 @@
*/
#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
#define IA64_KR_PA_CPU_INFO 3 /* ar.k3: phys addr of this cpu's cpu_info struct */
#define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */
#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
......
......@@ -11,6 +11,8 @@
#ifndef _ASM_IA64_MCA_H
#define _ASM_IA64_MCA_H
#define IA64_MCA_STACK_SIZE 8192
#if !defined(__ASSEMBLY__)
#include <linux/interrupt.h>
......@@ -102,21 +104,21 @@ typedef struct ia64_mca_os_to_sal_state_s {
*/
} ia64_mca_os_to_sal_state_t;
#define IA64_MCA_STACK_SIZE 1024
#define IA64_MCA_STACK_SIZE_BYTES (1024 * 8)
#define IA64_MCA_BSPSTORE_SIZE 1024
/* Per-CPU MCA state that is too big for normal per-CPU variables. */
typedef struct ia64_mca_cpu_s {
u64 ia64_mca_stack[IA64_MCA_STACK_SIZE] __attribute__((aligned(16)));
u64 ia64_mca_proc_state_dump[512] __attribute__((aligned(16)));
u64 ia64_mca_stackframe[32] __attribute__((aligned(16)));
u64 ia64_mca_bspstore[IA64_MCA_BSPSTORE_SIZE] __attribute__((aligned(16)));
u64 ia64_init_stack[KERNEL_STACK_SIZE/8] __attribute__((aligned(16)));
} ia64_mca_cpu_t;
struct ia64_mca_cpu {
u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */
u64 proc_state_dump[512];
u64 stackframe[32];
u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */
u64 init_stack[KERNEL_STACK_SIZE/8];
} __attribute__ ((aligned(16)));
#define PERCPU_MCA_SIZE sizeof(ia64_mca_cpu_t)
/* Array of physical addresses of each CPU's MCA area. */
extern unsigned long __per_cpu_mca[NR_CPUS];
extern void ia64_mca_init(void);
extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void);
extern void ia64_mca_ucmc_handler(void);
......
......@@ -46,40 +46,9 @@
mov temp = 0x7 ;; \
dep addr = temp, addr, 61, 3
/*
* This macro gets the physical address of this cpu's cpuinfo structure.
*/
#define GET_PERCPU_PADDR(reg) \
mov reg = ar.k3;; \
addl reg = IA64_CPUINFO_PERCPU_PADDR,reg
#define GET_CPUINFO_PAL_PADDR(reg) \
mov reg = ar.k3;; \
addl reg = IA64_CPUINFO_PAL_PADDR,reg
/*
* This macro gets the physical address of this cpu's MCA save structure.
*/
#define GET_CPUINFO_MCA_PADDR(reg) \
mov reg = ar.k3;; \
addl reg = IA64_CPUINFO_PA_MCA_INFO,reg;; \
ld8 reg = [reg]
#define GET_MCA_BSPSTORE(reg) \
GET_CPUINFO_MCA_PADDR(reg);; \
addl reg = IA64_MCA_BSPSTORE,reg
#define GET_MCA_STACKFRAME(reg) \
GET_CPUINFO_MCA_PADDR(reg);; \
addl reg = IA64_MCA_STACKFRAME,reg
#define GET_MCA_STACK(reg) \
GET_CPUINFO_MCA_PADDR(reg);; \
addl reg = IA64_MCA_STACK,reg
#define GET_MCA_DUMP_PADDR(reg) \
GET_CPUINFO_MCA_PADDR(reg);; \
addl reg = IA64_MCA_PROC_STATE_DUMP,reg
#define GET_THIS_PADDR(reg, var) \
mov reg = IA64_KR(PER_CPU_DATA);; \
addl reg = THIS_CPU(var), reg
/*
* This macro jumps to the instruction at the given virtual address
......
......@@ -56,8 +56,6 @@ extern void *per_cpu_init(void);
#endif /* SMP */
extern unsigned long __per_cpu_mca[NR_CPUS];
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
......
......@@ -151,12 +151,9 @@ struct cpuinfo_ia64 {
__u64 itc_freq; /* frequency of ITC counter */
__u64 proc_freq; /* frequency of processor */
__u64 cyc_per_usec; /* itc_freq/1000000 */
__u64 percpu_paddr;
__u64 ptce_base;
__u32 ptce_count[2];
__u32 ptce_stride[2];
__u64 pal_paddr;
__u64 pal_base;
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
#ifdef CONFIG_SMP
......@@ -177,7 +174,6 @@ struct cpuinfo_ia64 {
#ifdef CONFIG_NUMA
struct ia64_node_data *node_data;
#endif
__u64 *ia64_pa_mca_data; /* prt to MCA/INIT processor state */
};
DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
......
......@@ -4,7 +4,7 @@
/*
* IA-64 Linux syscall numbers and inline-functions.
*
* Copyright (C) 1998-2004 Hewlett-Packard Co
* Copyright (C) 1998-2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
......@@ -376,15 +376,13 @@ struct pt_regs;
struct sigaction;
long sys_execve(char __user *filename, char __user * __user *argv,
char __user * __user *envp, struct pt_regs *regs);
asmlinkage long sys_pipe(long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7, long stack);
asmlinkage long sys_pipe(void);
asmlinkage long sys_ptrace(long request, pid_t pid,
unsigned long addr, unsigned long data,
long arg4, long arg5, long arg6, long arg7, long stack);
unsigned long addr, unsigned long data);
asmlinkage long sys_rt_sigaction(int sig,
const struct sigaction __user *act,
struct sigaction __user *oact,
size_t sigsetsize);
const struct sigaction __user *act,
struct sigaction __user *oact,
size_t sigsetsize);
/*
* "Conditional" syscalls
......
......@@ -289,6 +289,7 @@ efi_guid_unparse(efi_guid_t *guid, char *out)
}
extern void efi_init (void);
extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void);
extern void efi_map_memmap(void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment