Commit 4af03459 authored by David Mosberger's avatar David Mosberger

ia64: Restructure pt_regs and optimize syscall path.

Patch by Rohit Seth, Fengua Yu, and Arun Sharma:

Please find attached a patch for kernel entry exit optimization. This is
based on 2.5.69 kernel.

The main items covered by this patch are:
1) Support for 16 bytes instructions as per SDM2.1 (CSD/SSD in pt_regs)
2) f10-f11 are added as additional scratch registers for kernel's use.
3) Re-arrange pt_regs to access less cache lines in system call. Reduce
scratch register saving/restoring in system call path.
4) A few instruction reorg in low-level code.
parent cb7b2a3b
......@@ -18,7 +18,7 @@ LDFLAGS_MODULE += -T arch/ia64/module.lds
AFLAGS_KERNEL := -mconstant-gp
EXTRA :=
cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \
-falign-functions=32 -frename-registers
CFLAGS_KERNEL := -mconstant-gp
......
......@@ -179,8 +179,10 @@ copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
* datasel ar.fdr(32:47)
*
* _st[(0+TOS)%8] f8
* _st[(1+TOS)%8] f9 (f8, f9 from ptregs)
* : : : (f10..f15 from live reg)
* _st[(1+TOS)%8] f9
* _st[(2+TOS)%8] f10
* _st[(3+TOS)%8] f11 (f8..f11 from ptregs)
* : : : (f12..f15 from live reg)
* : : :
* _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13)
*
......@@ -262,8 +264,8 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
__put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000
/*
* save f8 and f9 from pt_regs
* save f10..f15 from live register set
* save f8..f11 from pt_regs
* save f12..f15 from live register set
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
......@@ -278,11 +280,11 @@ save_ia32_fpstate_live (struct _fpstate_ia32 *save)
copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64f2ia32f(fpregp, &ptp->f9);
copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 10);
ia64f2ia32f(fpregp, &ptp->f10);
copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 11);
ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 13);
......@@ -394,8 +396,8 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr));
/*
* restore f8, f9 onto pt_regs
* restore f10..f15 onto live registers
* restore f8..f11 onto pt_regs
* restore f12..f15 onto live registers
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
......@@ -411,11 +413,11 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 *save)
ia32f2ia64f(&ptp->f8, fpregp);
copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f9, fpregp);
copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(10, fpregp);
ia32f2ia64f(&ptp->f10, fpregp);
copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(11, fpregp);
ia32f2ia64f(&ptp->f11, fpregp);
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
......@@ -738,11 +740,11 @@ restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 *sc, int *
#define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x)
#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) tmp << 48)
#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) tmp << 32)
#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48)
#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32)
#define copyseg_cs(tmp) (regs->r17 |= tmp)
#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) tmp << 16)
#define copyseg_es(tmp) (regs->r16 |= (unsigned long) tmp << 16)
#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16)
#define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16)
#define copyseg_ds(tmp) (regs->r16 |= tmp)
#define COPY_SEG(seg) \
......
......@@ -60,30 +60,26 @@ ia32_load_segment_descriptors (struct task_struct *task)
regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
task->thread.csd = load_desc(regs->r17 >> 0); /* CSD */
task->thread.ssd = load_desc(regs->r17 >> 16); /* SSD */
regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
}
void
ia32_save_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd;
unsigned long eflag, fsr, fcr, fir, fdr;
asm ("mov %0=ar.eflag;"
"mov %1=ar.fsr;"
"mov %2=ar.fcr;"
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
"mov %5=ar.csd;"
"mov %6=ar.ssd;"
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr), "=r"(csd), "=r"(ssd));
: "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
t->thread.eflag = eflag;
t->thread.fsr = fsr;
t->thread.fcr = fcr;
t->thread.fir = fir;
t->thread.fdr = fdr;
t->thread.csd = csd;
t->thread.ssd = ssd;
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
}
......@@ -91,7 +87,7 @@ ia32_save_state (struct task_struct *t)
void
ia32_load_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
unsigned long eflag, fsr, fcr, fir, fdr, tssd;
struct pt_regs *regs = ia64_task_regs(t);
int nr = get_cpu(); /* LDT and TSS depend on CPU number: */
......@@ -100,8 +96,6 @@ ia32_load_state (struct task_struct *t)
fcr = t->thread.fcr;
fir = t->thread.fir;
fdr = t->thread.fdr;
csd = t->thread.csd;
ssd = t->thread.ssd;
tssd = load_desc(_TSS(nr)); /* TSSD */
asm volatile ("mov ar.eflag=%0;"
......@@ -109,9 +103,7 @@ ia32_load_state (struct task_struct *t)
"mov ar.fcr=%2;"
"mov ar.fir=%3;"
"mov ar.fdr=%4;"
"mov ar.csd=%5;"
"mov ar.ssd=%6;"
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr), "r"(csd), "r"(ssd));
:: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
......
......@@ -1798,12 +1798,16 @@ put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switc
ia64f2ia32f(f, &ptp->f9);
break;
case 2:
ia64f2ia32f(f, &ptp->f10);
break;
case 3:
ia64f2ia32f(f, &ptp->f11);
break;
case 4:
case 5:
case 6:
case 7:
ia64f2ia32f(f, &swp->f10 + (regno - 2));
ia64f2ia32f(f, &swp->f12 + (regno - 4));
break;
}
copy_to_user(reg, f, sizeof(*reg));
......@@ -1824,12 +1828,16 @@ get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switc
copy_from_user(&ptp->f9, reg, sizeof(*reg));
break;
case 2:
copy_from_user(&ptp->f10, reg, sizeof(*reg));
break;
case 3:
copy_from_user(&ptp->f11, reg, sizeof(*reg));
break;
case 4:
case 5:
case 6:
case 7:
copy_from_user(&swp->f10 + (regno - 2), reg, sizeof(*reg));
copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
break;
}
return;
......
This diff is collapsed.
......@@ -4,6 +4,7 @@
* Preserved registers that are shared between code in ivt.S and entry.S. Be
* careful not to step on these!
*/
#define pLvSys p1 /* set 1 if leave from syscall; otherwise, set 0*/
#define pKStk p2 /* will leave_kernel return to kernel-stacks? */
#define pUStk p3 /* will leave_kernel return to user-stacks? */
#define pSys p4 /* are we processing a (synchronous) system call? */
......
This diff is collapsed.
......@@ -18,7 +18,7 @@
#define rARRNAT r24
#define rARBSPSTORE r23
#define rKRBS r22
#define rB6 r21
#define rB0 r21
#define rR1 r20
/*
......@@ -110,20 +110,21 @@
* we can pass interruption state as arguments to a handler.
*/
#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
mov rARRSC=ar.rsc; /* M */ \
mov rARUNAT=ar.unat; /* M */ \
mov rR1=r1; /* A */ \
MINSTATE_GET_CURRENT(r1); /* M (or M;;I) */ \
mov rARUNAT=ar.unat; /* M */ \
mov rCRIPSR=cr.ipsr; /* M */ \
mov rARPFS=ar.pfs; /* I */ \
mov rCRIIP=cr.iip; /* M */ \
mov rB6=b6; /* I */ /* rB6 = branch reg 6 */ \
mov r21=ar.fpsr; /* M */ \
COVER; /* B;; (or nothing) */ \
;; \
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r1; \
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
;; \
ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
/* switch from user to kernel RBS: */ \
;; \
invala; /* M */ \
......@@ -131,69 +132,73 @@
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */ \
;; \
MINSTATE_START_SAVE_MIN \
add r17=L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ \
;; \
st8 [r1]=rCRIPSR; /* save cr.ipsr */ \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
add r16=16,r1; /* initialize first base pointer */ \
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
adds r16=PT(CR_IPSR),r1; \
;; \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
st8 [r16]=rCRIPSR; /* save cr.ipsr */ \
;; \
lfetch.fault.excl.nt1 [r17]; \
adds r17=8,r1; /* initialize second base pointer */ \
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT; \
mov rCRIPSR=b0 \
;; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
;; \
st8 [r17]=rCRIIP,16; /* save cr.iip */ \
st8 [r16]=rCRIFS,16; /* save cr.ifs */ \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
;; \
.mem.offset 0,0; st8.spill [r16]=r10,24; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
st8 [r16]=rCRIIP,16; /* save cr.iip */ \
st8 [r17]=rCRIFS,16; /* save cr.ifs */ \
(pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
;; \
st8 [r17]=rARUNAT,16; /* save ar.unat */ \
st8 [r16]=rARPFS,16; /* save ar.pfs */ \
st8 [r16]=rARUNAT,16; /* save ar.unat */ \
st8 [r17]=rARPFS,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r17]=rARRSC,16; /* save ar.rsc */ \
(pUStk) st8 [r16]=rARRNAT,16; /* save ar.rnat */ \
(pKStk) adds r16=16,r16; /* skip over ar_rnat field */ \
st8 [r16]=rARRSC,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=rARRNAT,16; /* save ar.rnat */ \
(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \
(pUStk) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */ \
st8 [r16]=rARPR,16; /* save predicates */ \
(pKStk) adds r17=16,r17; /* skip over ar_bspstore field */ \
;; \
st8 [r17]=rB6,16; /* save b6 */ \
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT \
;; \
.mem.offset 8,0; st8.spill [r17]=rR1,16; /* save original r1 */ \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
;; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
.mem.offset 0,0; st8.spill [r16]=r12,16; \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
(pUStk) st8 [r16]=rARBSPSTORE,16; /* save ar.bspstore */ \
st8 [r17]=rARPR,16; /* save predicates */ \
(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
;; \
.mem.offset 8,0; st8.spill [r17]=r13,16; \
.mem.offset 0,0; st8.spill [r16]=r14,16; \
st8 [r16]=rCRIPSR,16; /* save b0 */ \
st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
;; \
.mem.offset 8,0; st8.spill [r17]=r15,16; \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
dep r14=-1,r0,61,3; \
;; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
.mem.offset 0,0; st8.spill [r16]=r10,16; \
.mem.offset 0,0; st8.spill [r16]=rR1,16; /* save original r1 */ \
.mem.offset 8,0; st8.spill [r17]=r12,16; \
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
;; \
.mem.offset 8,0; st8.spill [r17]=r11,16; \
.mem.offset 0,0; st8.spill [r16]=r13,16; \
.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
mov r13=IA64_KR(CURRENT); /* establish `current' */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r15,16; \
.mem.offset 8,0; st8.spill [r17]=r14,16; \
dep r14=-1,r0,61,3; \
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
EXTRA; \
movl r1=__gp; /* establish kernel global pointer */ \
;; \
MINSTATE_END_SAVE_MIN
/*
* SAVE_REST saves the remainder of pt_regs (with psr.ic on). This
* macro guarantees to preserve all predicate registers, r8, r9, r10,
* r11, r14, and r15.
* SAVE_REST saves the remainder of pt_regs (with psr.ic on).
*
* Assumed state upon entry:
* psr.ic: on
......@@ -202,49 +207,52 @@
*/
#define SAVE_REST \
.mem.offset 0,0; st8.spill [r2]=r16,16; \
;; \
.mem.offset 8,0; st8.spill [r3]=r17,16; \
.mem.offset 0,0; st8.spill [r2]=r18,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r18,16; \
.mem.offset 8,0; st8.spill [r3]=r19,16; \
.mem.offset 0,0; st8.spill [r2]=r20,16; \
;; \
mov r16=ar.ccv; /* M-unit */ \
movl r18=FPSR_DEFAULT /* L-unit */ \
;; \
mov r17=ar.fpsr; /* M-unit */ \
mov ar.fpsr=r18; /* M-unit */ \
;; \
.mem.offset 0,0; st8.spill [r2]=r20,16; \
.mem.offset 8,0; st8.spill [r3]=r21,16; \
.mem.offset 0,0; st8.spill [r2]=r22,16; \
mov r18=b0; \
mov r18=b6; \
;; \
.mem.offset 0,0; st8.spill [r2]=r22,16; \
.mem.offset 8,0; st8.spill [r3]=r23,16; \
.mem.offset 0,0; st8.spill [r2]=r24,16; \
mov r19=b7; \
;; \
.mem.offset 0,0; st8.spill [r2]=r24,16; \
.mem.offset 8,0; st8.spill [r3]=r25,16; \
.mem.offset 0,0; st8.spill [r2]=r26,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r26,16; \
.mem.offset 8,0; st8.spill [r3]=r27,16; \
.mem.offset 0,0; st8.spill [r2]=r28,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r28,16; \
.mem.offset 8,0; st8.spill [r3]=r29,16; \
.mem.offset 0,0; st8.spill [r2]=r30,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r30,16; \
.mem.offset 8,0; st8.spill [r3]=r31,16; \
st8 [r2]=r16,16; /* ar.ccv */ \
;; \
st8 [r3]=r17,16; /* ar.fpsr */ \
st8 [r2]=r18,16; /* b0 */ \
;; \
st8 [r3]=r19,16+8; /* b7 */ \
;; \
mov ar.fpsr=r11; /* M-unit */ \
st8 [r2]=r8,8; /* ar_ccv */ \
adds r3=16,r3; \
;; \
stf.spill [r2]=f6,32; \
stf.spill [r3]=f7,32; \
;; \
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32
stf.spill [r3]=f9,32; \
adds r24=PT(B6)+16,r12 \
;; \
stf.spill [r2]=f10,32; \
stf.spill [r3]=f11,32; \
adds r25=PT(B7)+16,r12 \
;; \
st8 [r24]=r18,16; /* b6 */ \
st8 [r25]=r19,16; /* b7 */ \
;; \
st8 [r24]=r9; /* ar.csd */ \
st8 [r25]=r10; /* ar.ssd */ \
;;
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs, mov r15=r19)
......
......@@ -102,6 +102,7 @@ show_regs (struct pt_regs *regs)
regs->ar_rnat, regs->ar_bspstore, regs->pr);
printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
regs->f6.u.bits[1], regs->f6.u.bits[0],
......@@ -109,6 +110,9 @@ show_regs (struct pt_regs *regs)
printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
regs->f8.u.bits[1], regs->f8.u.bits[0],
regs->f9.u.bits[1], regs->f9.u.bits[0]);
printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
regs->f10.u.bits[1], regs->f10.u.bits[0],
regs->f11.u.bits[1], regs->f11.u.bits[0]);
printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
......@@ -471,6 +475,8 @@ do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *
dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
unw_get_ar(info, UNW_AR_LC, &dst[53]);
unw_get_ar(info, UNW_AR_EC, &dst[54]);
unw_get_ar(info, UNW_AR_CSD, &dst[55]);
unw_get_ar(info, UNW_AR_SSD, &dst[56]);
}
void
......
......@@ -29,6 +29,8 @@
#include <asm/perfmon.h>
#endif
#define offsetof(type,field) ((unsigned long) &((type *) 0)->field)
/*
* Bits in the PSR that we allow ptrace() to change:
* be, up, ac, mfl, mfh (the user mask; five bits total)
......@@ -669,9 +671,12 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
else
ia64_flush_fph(child);
ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
} else if (addr >= PT_F10 && addr < PT_F15 + 16) {
} else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
/* scratch registers untouched by kernel (saved in pt_regs) */
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, f10) + addr - PT_F10);
} else if (addr >= PT_F12 && addr < PT_F15 + 16) {
/* scratch registers untouched by kernel (saved in switch_stack) */
ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);
ptr = (unsigned long *) ((long) sw + (addr - PT_NAT_BITS - 32));
} else if (addr < PT_AR_LC + 8) {
/* preserved state: */
unsigned long nat_bits, scratch_unat, dummy = 0;
......@@ -807,22 +812,69 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
else
return ia64_peek(child, sw, urbs_end, rnat_addr, data);
case PT_R1: case PT_R2: case PT_R3:
case PT_R1:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r1));
break;
case PT_R2: case PT_R3:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r2) + addr - PT_R2);
break;
case PT_R8: case PT_R9: case PT_R10: case PT_R11:
case PT_R12: case PT_R13: case PT_R14: case PT_R15:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r8)+ addr - PT_R8);
break;
case PT_R12: case PT_R13:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r12)+ addr - PT_R12);
break;
case PT_R14:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r14));
break;
case PT_R15:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r15));
break;
case PT_R16: case PT_R17: case PT_R18: case PT_R19:
case PT_R20: case PT_R21: case PT_R22: case PT_R23:
case PT_R24: case PT_R25: case PT_R26: case PT_R27:
case PT_R28: case PT_R29: case PT_R30: case PT_R31:
case PT_B0: case PT_B6: case PT_B7:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r16) + addr - PT_R16);
break;
case PT_B0:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b0));
break;
case PT_B6:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b6));
break;
case PT_B7:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b7));
break;
case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, f6) + addr - PT_F6);
break;
case PT_AR_BSPSTORE:
case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS:
case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
/* scratch register */
ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR);
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_bspstore));
break;
case PT_AR_RSC:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_rsc));
break;
case PT_AR_UNAT:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_unat));
break;
case PT_AR_PFS:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_pfs));
break;
case PT_AR_CCV:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_ccv));
break;
case PT_AR_FPSR:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_fpsr));
break;
case PT_CR_IIP:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, cr_iip));
break;
case PT_PR:
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, pr));
break;
/* scratch register */
default:
/* disallow accessing anything else... */
......@@ -830,6 +882,8 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
addr);
return -1;
}
} else if (addr <= PT_AR_SSD) {
ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_csd) + addr - PT_AR_CSD);
} else {
/* access debug registers */
......@@ -936,7 +990,8 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
/* gr1-gr3 */
retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long) * 3);
retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
/* gr4-gr7 */
......@@ -950,7 +1005,9 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
/* gr12-gr15 */
retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 4);
retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
/* gr16-gr31 */
......@@ -978,13 +1035,13 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
}
/* fr6-fr9 */
/* fr6-fr11 */
retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 4);
retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 6);
/* fp scratch regs(10-15) */
/* fp scratch regs(12-15) */
retval |= __copy_to_user(&ppr->fr[10], &sw->f10, sizeof(struct ia64_fpreg) * 6);
retval |= __copy_to_user(&ppr->fr[12], &sw->f12, sizeof(struct ia64_fpreg) * 4);
/* fr16-fr31 */
......@@ -1061,7 +1118,8 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
/* gr1-gr3 */
retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long) * 3);
retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
/* gr4-gr7 */
......@@ -1079,7 +1137,9 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
/* gr12-gr15 */
retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 4);
retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
/* gr16-gr31 */
......@@ -1107,13 +1167,13 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
}
/* fr6-fr9 */
/* fr6-fr11 */
retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 4);
retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 6);
/* fp scratch regs(10-15) */
/* fp scratch regs(12-15) */
retval |= __copy_from_user(&sw->f10, &ppr->fr[10], sizeof(ppr->fr[10]) * 6);
retval |= __copy_from_user(&sw->f12, &ppr->fr[12], sizeof(ppr->fr[12]) * 4);
/* fr16-fr31 */
......
......@@ -115,18 +115,28 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
err |= __get_user(cfm, &sc->sc_cfm);
err |= __get_user(um, &sc->sc_um); /* user mask */
err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 3*8); /* r1-r3 */
err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */
err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 4*8); /* r12-r15 */
err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
if ((flags & IA64_SC_FLAG_IN_SYSCALL)==0)
{
/* Restore most scratch-state only when not in syscall. */
err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __get_user(scr->pt.ar_csd, &sc->sc_ar25); /* ar.csd */
err |= __get_user(scr->pt.ar_ssd, &sc->sc_ar26); /* ar.ssd */
err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __copy_from_user(&scr->pt.r14, &sc->sc_gr[14], 8); /* r14 */
err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
}
scr->pt.cr_ifs = cfm | (1UL << 63);
......@@ -358,21 +368,42 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
err |= __put_user(cfm, &sc->sc_cfm);
err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um);
err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */
err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */
err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 3*8); /* r1-r3 */
err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */
err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 4*8); /* r12-r15 */
err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */
err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */
err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
if (flags & IA64_SC_FLAG_IN_SYSCALL)
{
/* Clear scratch registers if the signal interrupted a system call. */
err |= __clear_user(&sc->sc_ar_ccv, 8);
err |= __clear_user(&sc->sc_ar25,8); /* ar.csd */
err |= __clear_user(&sc->sc_ar26,8); /* ar.ssd */
err |= __clear_user(&sc->sc_br[6],8); /* b6 */
err |= __clear_user(&sc->sc_br[7],8); /* b7 */
err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */
err |= __clear_user(&sc->sc_gr[14],8); /* r14 */
err |= __clear_user(&sc->sc_gr[16],16*8); /* r16-r31 */
} else
{
/* Copy scratch registers to sigcontext if the signal did not interrupt a syscall. */
err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __put_user(scr->pt.ar_csd, &sc->sc_ar25); /* ar.csd */
err |= __put_user(scr->pt.ar_ssd, &sc->sc_ar26); /* ar.ssd */
err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
err |= __copy_to_user(&sc->sc_gr[14], &scr->pt.r14, 8); /* r14 */
err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
}
return err;
}
......
......@@ -275,7 +275,6 @@ static inline int
fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
struct pt_regs *regs)
{
struct ia64_fpreg f6_11[6];
fp_state_t fp_state;
fpswa_ret_t ret;
......@@ -290,11 +289,8 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long
* pointer to point to these registers.
*/
fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
f6_11[0] = regs->f6; f6_11[1] = regs->f7;
f6_11[2] = regs->f8; f6_11[3] = regs->f9;
__asm__ ("stf.spill %0=f10%P0" : "=m"(f6_11[4]));
__asm__ ("stf.spill %0=f11%P0" : "=m"(f6_11[5]));
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) f6_11;
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
/*
* unsigned long (*EFI_FPSWA) (
* unsigned long trap_type,
......@@ -310,10 +306,7 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long
(unsigned long *) ipsr, (unsigned long *) fpsr,
(unsigned long *) isr, (unsigned long *) pr,
(unsigned long *) ifs, &fp_state);
regs->f6 = f6_11[0]; regs->f7 = f6_11[1];
regs->f8 = f6_11[2]; regs->f9 = f6_11[3];
__asm__ ("ldf.fill f10=%0%P0" :: "m"(f6_11[4]));
__asm__ ("ldf.fill f11=%0%P0" :: "m"(f6_11[5]));
return ret.status;
}
......
......@@ -218,8 +218,9 @@ static u16 fr_info[32]={
RSW(f2), RSW(f3), RSW(f4), RSW(f5),
RPT(f6), RPT(f7), RPT(f8), RPT(f9),
RPT(f10), RPT(f11),
RSW(f10), RSW(f11), RSW(f12), RSW(f13), RSW(f14),
RSW(f12), RSW(f13), RSW(f14),
RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
......
/*
* Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
* - Change pt_regs_off() to make it less dependant on pt_regs structure.
*/
/*
* This file implements call frame unwind support for the Linux
......@@ -209,7 +211,10 @@ static struct {
#endif
};
#define OFF_CASE(reg, reg_num) \
case reg: \
off = struct_offset(struct pt_regs, reg_num); \
break;
/* Unwind accessors. */
/*
......@@ -220,16 +225,39 @@ pt_regs_off (unsigned long reg)
{
unsigned long off =0;
if (reg >= 1 && reg <= 3)
off = struct_offset(struct pt_regs, r1) + 8*(reg - 1);
else if (reg <= 11)
off = struct_offset(struct pt_regs, r8) + 8*(reg - 8);
else if (reg <= 15)
off = struct_offset(struct pt_regs, r12) + 8*(reg - 12);
else if (reg <= 31)
off = struct_offset(struct pt_regs, r16) + 8*(reg - 16);
else
UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
switch (reg)
{
OFF_CASE(1,r1)
OFF_CASE(2,r2)
OFF_CASE(3,r3)
OFF_CASE(8,r8)
OFF_CASE(9,r9)
OFF_CASE(10,r10)
OFF_CASE(11,r11)
OFF_CASE(12,r12)
OFF_CASE(13,r13)
OFF_CASE(14,r14)
OFF_CASE(15,r15)
OFF_CASE(16,r16)
OFF_CASE(17,r17)
OFF_CASE(18,r18)
OFF_CASE(19,r19)
OFF_CASE(20,r20)
OFF_CASE(21,r21)
OFF_CASE(22,r22)
OFF_CASE(23,r23)
OFF_CASE(24,r24)
OFF_CASE(25,r25)
OFF_CASE(26,r26)
OFF_CASE(27,r27)
OFF_CASE(28,r28)
OFF_CASE(29,r29)
OFF_CASE(30,r30)
OFF_CASE(31,r31)
default:
UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
break;
}
return off;
}
......@@ -419,10 +447,10 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
if (!addr)
addr = &info->sw->f2 + (regnum - 2);
} else if (regnum <= 15) {
if (regnum <= 9)
if (regnum <= 11)
addr = &pt->f6 + (regnum - 6);
else
addr = &info->sw->f10 + (regnum - 10);
addr = &info->sw->f12 + (regnum - 12);
} else if (regnum <= 31) {
addr = info->fr_loc[regnum - 16];
if (!addr)
......@@ -512,6 +540,14 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int
addr = &pt->ar_ccv;
break;
case UNW_AR_CSD:
addr = &pt->ar_csd;
break;
case UNW_AR_SSD:
addr = &pt->ar_ssd;
break;
default:
UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
__FUNCTION__, regnum);
......@@ -1379,7 +1415,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
else {
opc = UNW_INSN_MOVE_SCRATCH;
if (rval <= 9)
if (rval <= 11)
val = struct_offset(struct pt_regs, f6) + 16*(rval - 6);
else
UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
......
......@@ -37,57 +37,44 @@
#define NAME PASTE(PASTE(__,SGN),PASTE(OP,di3))
GLOBAL_ENTRY(NAME)
.prologue
.regstk 2,0,0,0
// Transfer inputs to FP registers.
setf.sig f8 = in0
setf.sig f9 = in1
;;
.fframe 16
.save.f 0x20
stf.spill [sp] = f17,-16
// Convert the inputs to FP, to avoid FP software-assist faults.
INT_TO_FP(f8, f8)
;;
.save.f 0x10
stf.spill [sp] = f16
.body
INT_TO_FP(f9, f9)
;;
frcpa.s1 f17, p6 = f8, f9 // y0 = frcpa(b)
frcpa.s1 f11, p6 = f8, f9 // y0 = frcpa(b)
;;
(p6) fmpy.s1 f7 = f8, f17 // q0 = a*y0
(p6) fnma.s1 f6 = f9, f17, f1 // e0 = -b*y0 + 1
(p6) fmpy.s1 f7 = f8, f11 // q0 = a*y0
(p6) fnma.s1 f6 = f9, f11, f1 // e0 = -b*y0 + 1
;;
(p6) fma.s1 f16 = f7, f6, f7 // q1 = q0*e0 + q0
(p6) fma.s1 f10 = f7, f6, f7 // q1 = q0*e0 + q0
(p6) fmpy.s1 f7 = f6, f6 // e1 = e0*e0
;;
#ifdef MODULO
sub in1 = r0, in1 // in1 = -b
#endif
(p6) fma.s1 f16 = f16, f7, f16 // q2 = q1*e1 + q1
(p6) fma.s1 f6 = f17, f6, f17 // y1 = y0*e0 + y0
(p6) fma.s1 f10 = f10, f7, f10 // q2 = q1*e1 + q1
(p6) fma.s1 f6 = f11, f6, f11 // y1 = y0*e0 + y0
;;
(p6) fma.s1 f6 = f6, f7, f6 // y2 = y1*e1 + y1
(p6) fnma.s1 f7 = f9, f16, f8 // r = -b*q2 + a
(p6) fnma.s1 f7 = f9, f10, f8 // r = -b*q2 + a
;;
#ifdef MODULO
setf.sig f8 = in0 // f8 = a
setf.sig f9 = in1 // f9 = -b
#endif
(p6) fma.s1 f17 = f7, f6, f16 // q3 = r*y2 + q2
(p6) fma.s1 f11 = f7, f6, f10 // q3 = r*y2 + q2
;;
.restore sp
ldf.fill f16 = [sp], 16
FP_TO_INT(f17, f17) // q = trunc(q3)
FP_TO_INT(f11, f11) // q = trunc(q3)
;;
#ifdef MODULO
xma.l f17 = f17, f9, f8 // r = q*(-b) + a
xma.l f11 = f11, f9, f8 // r = q*(-b) + a
;;
#endif
getf.sig r8 = f17 // transfer result to result register
ldf.fill f17 = [sp]
getf.sig r8 = f11 // transfer result to result register
br.ret.sptk.many rp
END(NAME)
......@@ -3,6 +3,8 @@
*
* Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2002-2003 Intel Co
* Fenghua Yu <fenghua.yu@intel.com>
*
* Note that this file has dual use: when building the kernel
* natively, the file is translated into a binary and executed. When
......@@ -59,6 +61,14 @@ tab[] =
{ "IA64_TASK_TGID_OFFSET", offsetof (struct task_struct, tgid) },
{ "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
{ "IA64_TASK_THREAD_ON_USTACK_OFFSET", offsetof (struct task_struct, thread.on_ustack) },
{ "IA64_PT_REGS_B6_OFFSET", offsetof (struct pt_regs, b6) },
{ "IA64_PT_REGS_B7_OFFSET", offsetof (struct pt_regs, b7) },
{ "IA64_PT_REGS_AR_CSD_OFFSET", offsetof (struct pt_regs, ar_csd) },
{ "IA64_PT_REGS_AR_SSD_OFFSET", offsetof (struct pt_regs, ar_ssd) },
{ "IA64_PT_REGS_R8_OFFSET", offsetof (struct pt_regs, r8) },
{ "IA64_PT_REGS_R9_OFFSET", offsetof (struct pt_regs, r9) },
{ "IA64_PT_REGS_R10_OFFSET", offsetof (struct pt_regs, r10) },
{ "IA64_PT_REGS_R11_OFFSET", offsetof (struct pt_regs, r11) },
{ "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) },
{ "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) },
{ "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) },
......@@ -68,19 +78,16 @@ tab[] =
{ "IA64_PT_REGS_AR_RNAT_OFFSET", offsetof (struct pt_regs, ar_rnat) },
{ "IA64_PT_REGS_AR_BSPSTORE_OFFSET",offsetof (struct pt_regs, ar_bspstore) },
{ "IA64_PT_REGS_PR_OFFSET", offsetof (struct pt_regs, pr) },
{ "IA64_PT_REGS_B6_OFFSET", offsetof (struct pt_regs, b6) },
{ "IA64_PT_REGS_B0_OFFSET", offsetof (struct pt_regs, b0) },
{ "IA64_PT_REGS_LOADRS_OFFSET", offsetof (struct pt_regs, loadrs) },
{ "IA64_PT_REGS_R1_OFFSET", offsetof (struct pt_regs, r1) },
{ "IA64_PT_REGS_R2_OFFSET", offsetof (struct pt_regs, r2) },
{ "IA64_PT_REGS_R3_OFFSET", offsetof (struct pt_regs, r3) },
{ "IA64_PT_REGS_R12_OFFSET", offsetof (struct pt_regs, r12) },
{ "IA64_PT_REGS_R13_OFFSET", offsetof (struct pt_regs, r13) },
{ "IA64_PT_REGS_R14_OFFSET", offsetof (struct pt_regs, r14) },
{ "IA64_PT_REGS_AR_FPSR_OFFSET", offsetof (struct pt_regs, ar_fpsr) },
{ "IA64_PT_REGS_R15_OFFSET", offsetof (struct pt_regs, r15) },
{ "IA64_PT_REGS_R8_OFFSET", offsetof (struct pt_regs, r8) },
{ "IA64_PT_REGS_R9_OFFSET", offsetof (struct pt_regs, r9) },
{ "IA64_PT_REGS_R10_OFFSET", offsetof (struct pt_regs, r10) },
{ "IA64_PT_REGS_R11_OFFSET", offsetof (struct pt_regs, r11) },
{ "IA64_PT_REGS_R14_OFFSET", offsetof (struct pt_regs, r14) },
{ "IA64_PT_REGS_R2_OFFSET", offsetof (struct pt_regs, r2) },
{ "IA64_PT_REGS_R3_OFFSET", offsetof (struct pt_regs, r3) },
{ "IA64_PT_REGS_R16_OFFSET", offsetof (struct pt_regs, r16) },
{ "IA64_PT_REGS_R17_OFFSET", offsetof (struct pt_regs, r17) },
{ "IA64_PT_REGS_R18_OFFSET", offsetof (struct pt_regs, r18) },
......@@ -98,21 +105,18 @@ tab[] =
{ "IA64_PT_REGS_R30_OFFSET", offsetof (struct pt_regs, r30) },
{ "IA64_PT_REGS_R31_OFFSET", offsetof (struct pt_regs, r31) },
{ "IA64_PT_REGS_AR_CCV_OFFSET", offsetof (struct pt_regs, ar_ccv) },
{ "IA64_PT_REGS_AR_FPSR_OFFSET", offsetof (struct pt_regs, ar_fpsr) },
{ "IA64_PT_REGS_B0_OFFSET", offsetof (struct pt_regs, b0) },
{ "IA64_PT_REGS_B7_OFFSET", offsetof (struct pt_regs, b7) },
{ "IA64_PT_REGS_F6_OFFSET", offsetof (struct pt_regs, f6) },
{ "IA64_PT_REGS_F7_OFFSET", offsetof (struct pt_regs, f7) },
{ "IA64_PT_REGS_F8_OFFSET", offsetof (struct pt_regs, f8) },
{ "IA64_PT_REGS_F9_OFFSET", offsetof (struct pt_regs, f9) },
{ "IA64_PT_REGS_F10_OFFSET", offsetof (struct pt_regs, f10) },
{ "IA64_PT_REGS_F11_OFFSET", offsetof (struct pt_regs, f11) },
{ "IA64_SWITCH_STACK_CALLER_UNAT_OFFSET", offsetof (struct switch_stack, caller_unat) },
{ "IA64_SWITCH_STACK_AR_FPSR_OFFSET", offsetof (struct switch_stack, ar_fpsr) },
{ "IA64_SWITCH_STACK_F2_OFFSET", offsetof (struct switch_stack, f2) },
{ "IA64_SWITCH_STACK_F3_OFFSET", offsetof (struct switch_stack, f3) },
{ "IA64_SWITCH_STACK_F4_OFFSET", offsetof (struct switch_stack, f4) },
{ "IA64_SWITCH_STACK_F5_OFFSET", offsetof (struct switch_stack, f5) },
{ "IA64_SWITCH_STACK_F10_OFFSET", offsetof (struct switch_stack, f10) },
{ "IA64_SWITCH_STACK_F11_OFFSET", offsetof (struct switch_stack, f11) },
{ "IA64_SWITCH_STACK_F12_OFFSET", offsetof (struct switch_stack, f12) },
{ "IA64_SWITCH_STACK_F13_OFFSET", offsetof (struct switch_stack, f13) },
{ "IA64_SWITCH_STACK_F14_OFFSET", offsetof (struct switch_stack, f14) },
......
......@@ -147,7 +147,7 @@ extern void ia64_init_addr_space (void);
* b0-b7
* ip cfm psr
* ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
......
......@@ -243,8 +243,6 @@ struct thread_struct {
__u64 fcr; /* IA32 floating pt control reg */
__u64 fir; /* IA32 fp except. instr. reg */
__u64 fdr; /* IA32 fp except. data reg */
__u64 csd; /* IA32 code selector descriptor */
__u64 ssd; /* IA32 stack selector descriptor */
__u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */
# define INIT_THREAD_IA32 .eflag = 0, \
......@@ -252,8 +250,6 @@ struct thread_struct {
.fcr = 0x17800000037fULL, \
.fir = 0, \
.fdr = 0, \
.csd = 0, \
.ssd = 0, \
.old_k1 = 0, \
.old_iob = 0,
#else
......@@ -328,11 +324,15 @@ struct thread_struct {
regs->r24 = 0; regs->r25 = 0; regs->r26 = 0; regs->r27 = 0; \
regs->r28 = 0; regs->r29 = 0; regs->r30 = 0; regs->r31 = 0; \
regs->ar_ccv = 0; \
regs->ar_csd = 0; \
regs->ar_ssd = 0; \
regs->b0 = 0; regs->b7 = 0; \
regs->f6.u.bits[0] = 0; regs->f6.u.bits[1] = 0; \
regs->f7.u.bits[0] = 0; regs->f7.u.bits[1] = 0; \
regs->f8.u.bits[0] = 0; regs->f8.u.bits[1] = 0; \
regs->f9.u.bits[0] = 0; regs->f9.u.bits[1] = 0; \
regs->f10.u.bits[0] = 0; regs->f10.u.bits[1] = 0; \
regs->f11.u.bits[0] = 0; regs->f11.u.bits[1] = 0; \
} \
} while (0)
......
......@@ -5,6 +5,10 @@
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2003 Intel Co
* Suresh Siddha <suresh.b.siddha@intel.com>
* Fenghua Yu <fenghua.yu@intel.com>
* Arun Sharma <arun.sharma@intel.com>
*
* 12/07/98 S. Eranian added pt_regs & switch_stack
* 12/21/98 D. Mosberger updated to match latest code
......@@ -93,6 +97,16 @@
*/
struct pt_regs {
/* The following registers are saved by SAVE_MIN: */
unsigned long b6; /* scratch */
unsigned long b7; /* scratch */
unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
unsigned long ar_ssd; /* reserved for future use (scratch) */
unsigned long r8; /* scratch (return value register 0) */
unsigned long r9; /* scratch (return value register 1) */
unsigned long r10; /* scratch (return value register 2) */
unsigned long r11; /* scratch (return value register 3) */
unsigned long cr_ipsr; /* interrupted task's psr */
unsigned long cr_iip; /* interrupted task's instruction pointer */
......@@ -106,22 +120,19 @@ struct pt_regs {
unsigned long ar_bspstore; /* RSE bspstore */
unsigned long pr; /* 64 predicate registers (1 bit each) */
unsigned long b6; /* scratch */
unsigned long b0; /* return pointer (bp) */
unsigned long loadrs; /* size of dirty partition << 16 */
unsigned long r1; /* the gp pointer */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
unsigned long r12; /* interrupted task's memory stack pointer */
unsigned long r13; /* thread pointer */
unsigned long r14; /* scratch */
unsigned long r15; /* scratch */
unsigned long r8; /* scratch (return value register 0) */
unsigned long r9; /* scratch (return value register 1) */
unsigned long r10; /* scratch (return value register 2) */
unsigned long r11; /* scratch (return value register 3) */
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long r15; /* scratch */
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
/* The following registers are saved by SAVE_REST: */
unsigned long r16; /* scratch */
......@@ -142,10 +153,7 @@ struct pt_regs {
unsigned long r31; /* scratch */
unsigned long ar_ccv; /* compare/exchange value (scratch) */
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long b0; /* return pointer (bp) */
unsigned long b7; /* scratch */
/*
* Floating point registers that the kernel considers
* scratch:
......@@ -154,6 +162,8 @@ struct pt_regs {
struct ia64_fpreg f7; /* scratch */
struct ia64_fpreg f8; /* scratch */
struct ia64_fpreg f9; /* scratch */
struct ia64_fpreg f10; /* scratch */
struct ia64_fpreg f11; /* scratch */
};
/*
......@@ -170,8 +180,6 @@ struct switch_stack {
struct ia64_fpreg f4; /* preserved */
struct ia64_fpreg f5; /* preserved */
struct ia64_fpreg f10; /* scratch, but untouched by kernel */
struct ia64_fpreg f11; /* scratch, but untouched by kernel */
struct ia64_fpreg f12; /* scratch, but untouched by kernel */
struct ia64_fpreg f13; /* scratch, but untouched by kernel */
struct ia64_fpreg f14; /* scratch, but untouched by kernel */
......
......@@ -11,9 +11,64 @@
*
* struct uarea {
* struct ia64_fpreg fph[96]; // f32-f127
* struct switch_stack sw;
* struct pt_regs pt;
* unsigned long rsvd1[712];
* unsigned long nat_bits;
* unsigned long empty1;
* struct ia64_fpreg f2; // f2-f5
* .
* .
* struct ia64_fpreg f5;
* struct ia64_fpreg f10; // f10-f31
* .
* .
* struct ia64_fpreg f31;
* unsigned long r4; // r4-r7
* .
* .
* unsigned long r7;
* unsigned long b1; // b1-b5
* .
* .
* unsigned long b5;
* unsigned long ar_ec;
* unsigned long ar_lc;
* unsigned long empty2[5];
* unsigned long cr_ipsr;
* unsigned long cr_iip;
* unsigned long cfm;
* unsigned long ar_unat;
* unsigned long ar_pfs;
* unsigned long ar_rsc;
* unsigned long ar_rnat;
* unsigned long ar_bspstore;
* unsigned long pr;
* unsigned long b6;
* unsigned long ar_bsp;
* unsigned long r1;
* unsigned long r2;
* unsigned long r3;
* unsigned long r12;
* unsigned long r13;
* unsigned long r14;
* unsigned long r15;
* unsigned long r8;
* unsigned long r9;
* unsigned long r10;
* unsigned long r11;
* unsigned long r16;
* .
* .
* unsigned long r31;
* unsigned long ar_ccv;
* unsigned long ar_fpsr;
* unsigned long b0;
* unsigned long b7;
* unsigned long f6;
* unsigned long f7;
* unsigned long f8;
* unsigned long f9;
* unsigned long ar_csd;
* unsigned long ar_ssd;
* unsigned long rsvd1[710];
* unsigned long dbr[8];
* unsigned long rsvd2[504];
* unsigned long ibr[8];
......@@ -119,7 +174,7 @@
#define PT_F125 0x05d0
#define PT_F126 0x05e0
#define PT_F127 0x05f0
/* switch stack: */
#define PT_NAT_BITS 0x0600
#define PT_F2 0x0610
......@@ -162,7 +217,6 @@
#define PT_AR_EC 0x0800
#define PT_AR_LC 0x0808
/* pt_regs */
#define PT_CR_IPSR 0x0830
#define PT_CR_IIP 0x0838
#define PT_CFM 0x0840
......@@ -209,6 +263,8 @@
#define PT_F7 0x0990
#define PT_F8 0x09a0
#define PT_F9 0x09b0
#define PT_AR_CSD 0x09c0
#define PT_AR_SSD 0x09c8
#define PT_DBR 0x2000 /* data breakpoint registers */
#define PT_IBR 0x3000 /* instruction breakpoint registers */
......
......@@ -56,7 +56,7 @@ struct sigcontext {
unsigned long sc_rbs_base; /* NULL or new base of sighandler's rbs */
unsigned long sc_loadrs; /* see description above */
unsigned long sc_ar25; /* rsvd for scratch use */
unsigned long sc_ar25; /* cmp8xchg16 uses this */
unsigned long sc_ar26; /* rsvd for scratch use */
unsigned long sc_rsvd[12]; /* reserved for future use */
/*
......
......@@ -26,7 +26,9 @@ enum unw_application_register {
UNW_AR_EC,
UNW_AR_FPSR,
UNW_AR_RSC,
UNW_AR_CCV
UNW_AR_CCV,
UNW_AR_CSD,
UNW_AR_SSD
};
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment