Commit 4e416e98 authored by Paul Mackerras's avatar Paul Mackerras

PPC32: This changeset adds preemptible kernel support for ppc32

and also streamlines the exception entry/exit code by not saving
all the GPRs on the common exceptions (system call, external
interrupt and decrementer).
parent bb3ffc11
......@@ -165,7 +165,9 @@ bool 'Symmetric multi-processing support' CONFIG_SMP
if [ "$CONFIG_SMP" = "y" ]; then
bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
fi
define_bool CONFIG_PREEMPT n
if [ "$CONFIG_SMP" != "y" ]; then
bool 'Preemptible Kernel' CONFIG_PREEMPT
fi
if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
bool 'AltiVec Support' CONFIG_ALTIVEC
......
......@@ -200,6 +200,8 @@ fix_alignment(struct pt_regs *regs)
unsigned char v[8];
} data;
CHECK_FULL_REGS(regs);
#if defined(CONFIG_4xx)
/* The 4xx-family processors have no DSISR register,
* so we emulate it.
......
......@@ -46,45 +46,42 @@
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
* on address translation.
* Note that we rely on the caller having set cr0.eq iff the exception
* occurred in kernel mode (i.e. MSR:PR = 0).
*/
.globl transfer_to_handler_full
transfer_to_handler_full:
SAVE_NVGPRS(r11)
/* fall through */
.globl transfer_to_handler
transfer_to_handler:
stw r22,_NIP(r21)
stw r23,_MSR(r21)
SAVE_4GPRS(8, r21)
SAVE_8GPRS(12, r21)
SAVE_8GPRS(24, r21)
andi. r23,r23,MSR_PR
mfspr r23,SPRG3
addi r2,r23,-THREAD /* set r2 to current */
tovirt(r2,r2)
stw r2,GPR2(r11)
stw r12,_NIP(r11)
stw r9,_MSR(r11)
mfctr r12
mfspr r2,XER
stw r12,_CTR(r11)
stw r2,_XER(r11)
mfspr r12,SPRG3
addi r2,r12,-THREAD
tovirt(r2,r2) /* set r2 to current */
beq 2f /* if from user, fix up THREAD.regs */
addi r24,r1,STACK_FRAME_OVERHEAD
stw r24,PT_REGS(r23)
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */
stw r22,THREAD_VRSAVE(r23)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
addi r11,r1,STACK_FRAME_OVERHEAD
stw r11,PT_REGS(r12)
b 3f
2: /* if from kernel, check for stack overflow */
lwz r22,THREAD_INFO-THREAD(r23)
cmplw r1,r22 /* if r1 <= current->thread_info */
lwz r11,THREAD_INFO-THREAD(r12)
cmplw r1,r11 /* if r1 <= current->thread_info */
ble- stack_ovf /* then the kernel stack overflowed */
3:
mflr r23
andi. r24,r23,0x3f00 /* get vector offset */
stw r24,TRAP(r21)
li r22,0
stw r22,RESULT(r21)
mtspr SPRG2,r22 /* r1 is now kernel sp */
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
mtlr r23
mflr r9
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
FIX_SRR1(r10,r12)
mtspr SRR0,r11
mtspr SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
......@@ -93,108 +90,63 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* and call StackOverflow(regs), which should not return.
*/
stack_ovf:
SAVE_NVGPRS(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
tovirt(r2,r2) /* set r2 to current */
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
lis r24,StackOverflow@ha
addi r24,r24,StackOverflow@l
li r20,MSR_KERNEL
FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
lis r9,StackOverflow@ha
addi r9,r9,StackOverflow@l
li r10,MSR_KERNEL
FIX_SRR1(r10,r12)
mtspr SRR0,r9
mtspr SRR1,r10
SYNC
RFI
#endif /* CONFIG_PPC_ISERIES */
#ifdef SHOW_SYSCALLS_TASK
.data
show_syscalls_task:
.long -1
#endif
/*
* Handle a system call.
*/
.text
.stabs "arch/ppc/kernel/",N_SO,0,0,0f
.stabs "entry.S",N_SO,0,0,0f
0:
_GLOBAL(DoSyscall)
stw r0,THREAD+LAST_SYSCALL(r2)
stw r3,ORIG_GPR3(r1)
li r12,0
stw r12,RESULT(r1)
lwz r11,_CCR(r1) /* Clear SO bit in CR */
lis r10,0x1000
andc r11,r11,r10
rlwinm r11,r11,0,4,2
stw r11,_CCR(r1)
#ifdef SHOW_SYSCALLS
#ifdef SHOW_SYSCALLS_TASK
lis r31,show_syscalls_task@ha
lwz r31,show_syscalls_task@l(r31)
cmp 0,r2,r31
bne 1f
#endif
lis r3,7f@ha
addi r3,r3,7f@l
lwz r4,GPR0(r1)
lwz r5,GPR3(r1)
lwz r6,GPR4(r1)
lwz r7,GPR5(r1)
lwz r8,GPR6(r1)
lwz r9,GPR7(r1)
bl printk
lis r3,77f@ha
addi r3,r3,77f@l
lwz r4,GPR8(r1)
lwz r5,GPR9(r1)
mr r6,r2
bl printk
lwz r0,GPR0(r1)
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
lwz r5,GPR5(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
1:
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
cmpi 0,r0,0x7777 /* Special case for 'sys_sigreturn' */
beq- 10f
cmpi 0,r0,0x6666 /* Special case for 'sys_rt_sigreturn' */
beq- 16f
cmpli 0,r0,NR_syscalls
bge- 66f
rlwinm r10,r1,0,0,18 /* current_thread_info() */
lwz r10,TI_FLAGS(r10)
andi. r10,r10,_TIF_SYSCALL_TRACE
bne- 50f
cmpli 0,r0,NR_syscalls
bge- 66f
bne- syscall_dotrace
syscall_dotrace_cont:
lis r10,sys_call_table@h
ori r10,r10,sys_call_table@l
slwi r0,r0,2
lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
cmpi 0,r10,0
beq- 66f
mtlr r10
addi r9,r1,STACK_FRAME_OVERHEAD
blrl /* Call handler */
.globl ret_from_syscall_1
ret_from_syscall_1:
20: stw r3,RESULT(r1) /* Save result */
.globl ret_from_syscall
ret_from_syscall:
#ifdef SHOW_SYSCALLS
#ifdef SHOW_SYSCALLS_TASK
cmp 0,r2,r31
bne 91f
bl do_show_syscall_exit
#endif
mr r4,r3
lis r3,79f@ha
addi r3,r3,79f@l
bl printk
lwz r3,RESULT(r1)
91:
#endif
li r10,-_LAST_ERRNO
cmpl 0,r3,r10
blt 30f
mr r6,r3
li r11,-_LAST_ERRNO
cmpl 0,r3,r11
blt+ 30f
neg r3,r3
cmpi 0,r3,ERESTARTNOHAND
bne 22f
......@@ -202,24 +154,50 @@ ret_from_syscall_1:
22: lwz r10,_CCR(r1) /* Set SO bit in CR */
oris r10,r10,0x1000
stw r10,_CCR(r1)
30: stw r3,GPR3(r1) /* Update return value */
b ret_from_except
/* disable interrupts so current_thread_info()->flags can't change */
30: li r10,MSR_KERNEL /* doesn't include MSR_EE */
SYNC
MTMSRD(r10)
rlwinm r12,r1,0,0,18 /* current_thread_info() */
lwz r9,TI_FLAGS(r12)
andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne- syscall_exit_work
syscall_exit_cont:
PPC405_ERR77(0,r1)
stwcx. r0,0,r1 /* to clear the reservation */
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
mtcr r5
lwz r7,_NIP(r1)
lwz r8,_MSR(r1)
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
mtspr SRR0,r7
mtspr SRR1,r8
SYNC
RFI
66: li r3,ENOSYS
b 22b
/* sys_sigreturn */
10: addi r3,r1,STACK_FRAME_OVERHEAD
bl sys_sigreturn
cmpi 0,r3,0 /* Check for restarted system call */
bge ret_from_except
b 20b
/* sys_rt_sigreturn */
16: addi r3,r1,STACK_FRAME_OVERHEAD
bl sys_rt_sigreturn
cmpi 0,r3,0 /* Check for restarted system call */
bge ret_from_except
b 20b
.globl ret_from_fork
ret_from_fork:
REST_NVGPRS(r1)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
bl schedule_tail
#endif
li r3,0
b ret_from_syscall
/* Traced system call support */
50: bl do_syscall_trace
syscall_dotrace:
SAVE_NVGPRS(r1)
li r0,0xc00
stw r0,TRAP(r1)
bl do_syscall_trace
lwz r0,GPR0(r1) /* Restore original registers */
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
......@@ -227,43 +205,167 @@ ret_from_syscall_1:
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
lwz r9,GPR9(r1)
cmpli 0,r0,NR_syscalls
bge- 66f
lis r10,sys_call_table@h
ori r10,r10,sys_call_table@l
slwi r0,r0,2
lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
cmpi 0,r10,0
beq- 66f
mtlr r10
addi r9,r1,STACK_FRAME_OVERHEAD
blrl /* Call handler */
.globl ret_from_syscall_2
ret_from_syscall_2:
stw r3,RESULT(r1) /* Save result */
stw r3,GPR0(r1) /* temporary gross hack to make strace work */
li r10,-_LAST_ERRNO
cmpl 0,r3,r10
blt 60f
neg r3,r3
cmpi 0,r3,ERESTARTNOHAND
bne 52f
li r3,EINTR
52: lwz r10,_CCR(r1) /* Set SO bit in CR */
oris r10,r10,0x1000
stw r10,_CCR(r1)
60: stw r3,GPR3(r1) /* Update return value */
REST_NVGPRS(r1)
b syscall_dotrace_cont
syscall_exit_work:
stw r6,RESULT(r1) /* Save result */
stw r3,GPR3(r1) /* Update return value */
andi. r0,r9,_TIF_SYSCALL_TRACE
beq 5f
stw r6,GPR0(r1) /* temporary gross hack to make strace work */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* re-enable interrupts */
lwz r4,TRAP(r1)
andi. r4,r4,1
beq 4f
SAVE_NVGPRS(r1)
li r4,0xc00
stw r4,TRAP(r1)
4:
bl do_syscall_trace
b ret_from_except
66: li r3,ENOSYS
b 52b
REST_NVGPRS(r1)
2:
lwz r3,GPR3(r1)
li r10,MSR_KERNEL /* doesn't include MSR_EE */
SYNC
MTMSRD(r10) /* disable interrupts again */
rlwinm r12,r1,0,0,18 /* current_thread_info() */
lwz r9,TI_FLAGS(r12)
5:
andi. r0,r9,_TIF_NEED_RESCHED
bne 1f
lwz r5,_MSR(r1)
andi. r5,r5,MSR_PR
beq syscall_exit_cont
andi. r0,r9,_TIF_SIGPENDING
beq syscall_exit_cont
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* re-enable interrupts */
b syscall_do_signal
1:
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* re-enable interrupts */
bl schedule
b 2b
#ifdef SHOW_SYSCALLS
do_show_syscall:
#ifdef SHOW_SYSCALLS_TASK
lis r11,show_syscalls_task@ha
lwz r11,show_syscalls_task@l(r11)
cmp 0,r2,r11
bnelr
#endif
stw r31,GPR31(r1)
mflr r31
lis r3,7f@ha
addi r3,r3,7f@l
lwz r4,GPR0(r1)
lwz r5,GPR3(r1)
lwz r6,GPR4(r1)
lwz r7,GPR5(r1)
lwz r8,GPR6(r1)
lwz r9,GPR7(r1)
bl printk
lis r3,77f@ha
addi r3,r3,77f@l
lwz r4,GPR8(r1)
mr r5,r2
bl printk
lwz r0,GPR0(r1)
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
lwz r5,GPR5(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
mtlr r31
lwz r31,GPR31(r1)
blr
do_show_syscall_exit:
#ifdef SHOW_SYSCALLS_TASK
lis r11,show_syscalls_task@ha
lwz r11,show_syscalls_task@l(r11)
cmp 0,r2,r11
bnelr
#endif
stw r31,GPR31(r1)
mflr r31
stw r3,RESULT(r1) /* Save result */
mr r4,r3
lis r3,79f@ha
addi r3,r3,79f@l
bl printk
lwz r3,RESULT(r1)
mtlr r31
lwz r31,GPR31(r1)
blr
7: .string "syscall %d(%x, %x, %x, %x, %x, "
77: .string "%x, %x), current=%p\n"
77: .string "%x), current=%p\n"
79: .string " -> %x\n"
.align 2,0
#ifdef SHOW_SYSCALLS_TASK
.data
.globl show_syscalls_task
show_syscalls_task:
.long -1
.text
#endif
#endif /* SHOW_SYSCALLS */
/*
* The sigsuspend and rt_sigsuspend system calls can call do_signal
* and thus put the process into the stopped state where we might
* want to examine its user state with ptrace. Therefore we need
* to save all the nonvolatile registers (r13 - r31) before calling
* the C code.
*/
.globl ppc_sigsuspend
ppc_sigsuspend:
SAVE_NVGPRS(r1)
lwz r0,TRAP(r1)
rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
stw r0,TRAP(r1) /* register set saved */
b sys_sigsuspend
.globl ppc_rt_sigsuspend
ppc_rt_sigsuspend:
SAVE_NVGPRS(r1)
lwz r0,TRAP(r1)
rlwinm r0,r0,0,0,30
stw r0,TRAP(r1)
b sys_rt_sigsuspend
.globl ppc_fork
ppc_fork:
SAVE_NVGPRS(r1)
lwz r0,TRAP(r1)
rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
stw r0,TRAP(r1) /* register set saved */
b sys_fork
.globl ppc_vfork
ppc_vfork:
SAVE_NVGPRS(r1)
lwz r0,TRAP(r1)
rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
stw r0,TRAP(r1) /* register set saved */
b sys_vfork
.globl ppc_clone
ppc_clone:
SAVE_NVGPRS(r1)
lwz r0,TRAP(r1)
rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
stw r0,TRAP(r1) /* register set saved */
b sys_clone
/*
* This routine switches between two different tasks. The process
......@@ -290,25 +392,26 @@ _GLOBAL(_switch)
stwu r1,-INT_FRAME_SIZE(r1)
mflr r0
stw r0,INT_FRAME_SIZE+4(r1)
/* r3-r13 are caller saved -- Cort */
SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1)
/* r3-r12 are caller saved -- Cort */
SAVE_NVGPRS(r1)
stw r0,_NIP(r1) /* Return to switch caller */
mfmsr r22
mfmsr r11
li r0,MSR_FP /* Disable floating-point */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris r0,r0,MSR_VEC@h /* Disable altivec */
mfspr r12,SPRN_VRSAVE /* save vrsave register value */
stw r12,THREAD+THREAD_VRSAVE(r2)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
and. r0,r0,r22 /* FP or altivec enabled? */
and. r0,r0,r11 /* FP or altivec enabled? */
beq+ 1f
andc r22,r22,r0
mtmsr r22
andc r11,r11,r0
MTMSRD(r11)
isync
1: stw r22,_MSR(r1)
mfcr r20
stw r20,_CCR(r1)
1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */
tophys(r0,r4)
......@@ -318,148 +421,247 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* save the old current 'last' for return value */
mr r3,r2
addi r2,r4,-THREAD /* Update current */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
lwz r0,THREAD+THREAD_VRSAVE(r2)
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r13 are destroyed -- Cort */
REST_2GPRS(14, r1)
REST_8GPRS(16, r1)
REST_8GPRS(24, r1)
/* r3-r12 are destroyed -- Cort */
REST_NVGPRS(r1)
lwz r4,_NIP(r1) /* Return to _switch caller in new task */
mtlr r4
addi r1,r1,INT_FRAME_SIZE
blr
.globl ret_from_fork
ret_from_fork:
#if CONFIG_SMP || CONFIG_PREEMPT
bl schedule_tail
#endif
rlwinm r3,r1,0,0,18
lwz r3,TI_FLAGS(r3)
andi. r0,r3,_TIF_SYSCALL_TRACE
.globl sigreturn_exit
sigreturn_exit:
subi r1,r3,STACK_FRAME_OVERHEAD
rlwinm r12,r1,0,0,18 /* current_thread_info() */
lwz r9,TI_FLAGS(r12)
andi. r0,r9,_TIF_SYSCALL_TRACE
bnel- do_syscall_trace
b ret_from_except
/* fall through */
.globl ret_from_except_full
ret_from_except_full:
REST_NVGPRS(r1)
/* fall through */
.globl ret_from_intercept
ret_from_intercept:
/*
* We may be returning from RTL and cannot do the normal checks
* -- Cort
*/
cmpi 0,r3,0
beq restore
.globl ret_from_except
ret_from_except:
REST_10GPRS(13,r1)
REST_8GPRS(23,r1)
REST_GPR(31,r1)
/* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt. */
recheck:
mfmsr r10
rlwinm r0,r10,0,17,15 /* clear MSR_EE in r0 */
#ifdef CONFIG_4xx
rlwinm r0,r0,0,23,21 /* clear MSR_DE in r0 */
#endif
li r10,MSR_KERNEL /* doesn't include EE */
SYNC /* Some chip revs have problems here... */
mtmsr r0 /* Update machine state */
MTMSRD(r10) /* disable interrupts */
lwz r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
beq+ restore /* if not, just restore regs and return */
beq resume_kernel
/* Check current_thread_info()->flags */
rlwinm r3,r1,0,0,18
lwz r3,TI_FLAGS(r3)
andi. r0,r3,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
rlwinm r9,r1,0,0,18
lwz r9,TI_FLAGS(r9)
andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne do_work
.globl ret_to_user_hook
ret_to_user_hook:
nop
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
lwz r0,THREAD+THREAD_VRSAVE(r2)
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
addi r0,r1,INT_FRAME_SIZE /* size of frame */
stw r0,THREAD+KSP(r2) /* save kernel stack pointer */
#ifdef CONFIG_PREEMPT
b restore
#ifndef CONFIG_PPC_ISERIES
tophys(r8,r1)
CLR_TOP32(r8)
mtspr SPRG2,r8 /* phys exception stack pointer */
#else /* CONFIG_PPC_ISERIES */
mfspr r2,SPRG1 /* Get Paca address */
stw r1,PACAKSAVE(r2) /* save exception stack pointer */
#endif /* CONFIG_PPC_ISERIES */
resume_kernel:
rlwinm r9,r1,0,0,18 /* check current_thread_info->preempt_count */
lwz r3,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED
bne do_work
#else
resume_kernel:
#endif /* CONFIG_PREEMPT */
/* interrupts are hard-disabled at this point */
restore:
REST_8GPRS(4, r1)
REST_GPR(12, r1)
lwz r3,_XER(r1)
mtspr XER,r3
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
REST_2GPRS(7, r1)
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
mtspr XER,r10
mtctr r11
PPC405_ERR77(0,r1)
stwcx. r0,0,r1 /* to clear the reservation */
lwz r3,_CTR(r1)
lwz r0,_LINK(r1)
mtctr r3
mtlr r0
#ifndef CONFIG_4xx
lwz r9,_MSR(r1)
andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
lwz r0,_MSR(r1)
lwz r3,_CCR(r1)
FIX_SRR1(r0,r2)
lwz r2,_NIP(r1)
mtcrf 0xFF,r3
lwz r10,_CCR(r1)
lwz r11,_LINK(r1)
mtcrf 0xFF,r10
mtlr r11
/*
* We can't afford to take an exception between setting SRR0/1
* and the rfi. Since GPR0(r1) .. GPR3(r1) are in the same cache
* line, loading r3 here should mean that we should have a HPTE
* (for classic PPC) or TLB entry (for 4xx/8xx) for that cache
* line, even if it isn't covered by a BAT register.
* In addition, the cache line itself will be in L1 cache.
* There is still the possibility of the HPTE getting evicted
* on SMP systems.
* Once we put values in SRR0 and SRR1, we are in a state
* where exceptions are not recoverable, since taking an
* exception will trash SRR0 and SRR1. Therefore we clear the
* MSR:RI bit to indicate this. If we do take an exception,
* we can't return to the point of the exception but we
* can restart the exception exit path at the label
* exc_exit_restart below. -- paulus
*/
lwz r3,GPR3(r1)
mtspr SRR1,r0
mtspr SRR0,r2
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
li r10,MSR_KERNEL & ~MSR_RI
SYNC
MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart
exc_exit_restart:
lwz r9,_MSR(r1)
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
mtspr SRR0,r12
mtspr SRR1,r9
REST_4GPRS(9, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
exc_exit_restart_end:
SYNC
PPC405_ERR77_SYNC
RFI
#else /* CONFIG_4xx */
/*
* This is a bit different on 4xx because 4xx doesn't have
* the RI bit in the MSR, and because we have critical
* exceptions, for which we need to restore SRR0 and SRR1
* and then use SRR2/SRR3 to return from the exception.
* The TLB miss handler checks if we have interrupted
* the exception exit path and restarts it if so.
*/
lwz r10,TRAP(r1) /* check for critical exception */
lwz r11,_LINK(r1)
andi. r10,r10,2
mtlr r11
lwz r10,_CCR(r1)
bne crit_exc_exit
mtcrf 0xff,r10
REST_2GPRS(9, r1)
.globl exc_exit_restart
exc_exit_restart:
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
exc_exit_start:
mtspr SRR0,r11
mtspr SRR1,r12
REST_2GPRS(11, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
exc_exit_restart_end:
PPC405_ERR77_SYNC
rfi
crit_exc_exit:
mtcrf 0xff,r10
/* avoid any possible TLB misses here by turning off MSR.DR, we
* assume the instructions here are mapped by a pinned TLB entry */
li r10,MSR_IR
mtmsr r10
isync
tophys(r1, r1)
lwz r9,_SRR0(r1)
lwz r10,_SRR1(r1)
mtspr SRR0,r9
lwz r11,_NIP(r1)
mtspr SRR1,r10
lwz r12,_MSR(r1)
mtspr SRR2,r11
mtspr SRR3,r12
REST_4GPRS(9, r1)
lwz r1,GPR1(r1)
PPC405_ERR77_SYNC
rfci
#endif /* CONFIG_4xx */
recheck:
li r10,MSR_KERNEL
SYNC
MTMSRD(r10) /* disable interrupts */
rlwinm r9,r1,0,0,18
lwz r9,TI_FLAGS(r9)
#ifdef CONFIG_PREEMPT
lwz r0,_MSR(r1)
li r11,_TIF_NEED_RESCHED
/* move MSR_PR bit down to TIF_SIGPENDING (0x4) bit */
rlwimi r11,r0,18+TIF_SIGPENDING,31-TIF_SIGPENDING,31-TIF_SIGPENDING
and. r0,r9,r11
#else /* CONFIG_PREEMPT */
andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
#endif /* CONFIG_PREEMPT */
beq restore
do_work:
ori r10,r10,MSR_EE
SYNC
mtmsr r10 /* hard-enable interrupts */
andi. r0,r3,_TIF_NEED_RESCHED
MTMSRD(r10) /* hard-enable interrupts */
andi. r0,r9,_TIF_NEED_RESCHED
beq 1f
bl schedule
b recheck
1:
andi. r0,r3,_TIF_SIGPENDING
syscall_do_signal:
/* save r13-r31 in the exception frame, if not already done */
lwz r3,TRAP(r1)
andi. r0,r3,1
beq 2f
li r3,0
SAVE_NVGPRS(r1)
rlwinm r3,r3,0,0,30
stw r3,TRAP(r1)
2: li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD
bl do_signal
REST_NVGPRS(r1)
b recheck
2:
/* nobody uses the TIF_NOTIFY_RESUME bit yet */
b recheck
/*
* We come here when we are at the end of handling an exception
* that occurred at a place where taking an exception will lose
* state information, such as the contents of SRR0 and SRR1.
*/
nonrecoverable:
lis r10,exc_exit_restart_end@ha
addi r10,r10,exc_exit_restart_end@l
cmplw r12,r10
bge 3f
lis r11,exc_exit_restart@ha
addi r11,r11,exc_exit_restart@l
cmplw r12,r11
blt 3f
lis r10,ee_restarts@ha
lwz r12,ee_restarts@l(r10)
addi r12,r12,1
stw r12,ee_restarts@l(r10)
mr r12,r11 /* restart at exc_exit_restart */
blr
3: /* OK, we can't recover, kill this process */
lwz r3,TRAP(r1)
andi. r0,r3,1
beq 4f
SAVE_NVGPRS(r1)
rlwinm r3,r3,0,0,30
stw r3,TRAP(r1)
4: addi r3,r1,STACK_FRAME_OVERHEAD
bl nonrecoverable_exception
/* shouldn't return */
b 4b
.comm ee_restarts,4
/*
* PROM code for specific machines follows. Put it
......@@ -472,39 +674,43 @@ do_work:
* called with the MMU off.
*/
_GLOBAL(enter_rtas)
stwu r1,-INT_FRAME_SIZE(r1)
mflr r0
stw r0,20(r1)
stw r0,INT_FRAME_SIZE+4(r1)
lis r4,rtas_data@ha
lwz r4,rtas_data@l(r4)
lis r6,1f@ha /* physical return address for rtas */
addi r6,r6,1f@l
addis r6,r6,-KERNELBASE@h
subi r7,r1,INT_FRAME_SIZE
addis r7,r7,-KERNELBASE@h
tophys(r6,r6)
tophys(r7,r1)
lis r8,rtas_entry@ha
lwz r8,rtas_entry@l(r8)
mfmsr r9
stw r9,8(r1)
li r0,0
ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_FE0|MSR_FE1
andc r0,r9,r0
li r10,MSR_IR|MSR_DR|MSR_FP
andc r9,r0,r10
li r0,MSR_KERNEL
SYNC /* disable interrupts so SRR0/1 */
mtmsr r0 /* don't get trashed */
MTMSRD(r0) /* don't get trashed */
li r9,MSR_
mtlr r6
CLR_TOP32(r7)
mtspr SPRG2,r7
mtspr SRR0,r8
mtspr SRR1,r9
RFI
1: addis r9,r1,-KERNELBASE@h
lwz r8,20(r9) /* get return address */
1: tophys(r9,r1)
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
lwz r9,8(r9) /* original msr value */
FIX_SRR1(r9,r0)
addi r1,r1,INT_FRAME_SIZE
li r0,0
mtspr SPRG2,r0
mtspr SRR0,r8
mtspr SRR1,r9
RFI /* return to caller */
.globl machine_check_in_rtas
machine_check_in_rtas:
twi 31,0,0
/* XXX load up BATs and panic */
#endif /* CONFIG_ALL_PPC */
......@@ -233,55 +233,92 @@ __secondary_hold:
* task's thread_struct.
*/
#define EXCEPTION_PROLOG \
mtspr SPRG0,r20; \
mtspr SPRG1,r21; \
mfcr r20; \
mfspr r21,SPRG2; /* exception stack to use from */ \
cmpwi 0,r21,0; /* user mode or RTAS */ \
bne 1f; \
tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \
subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\
1: CLR_TOP32(r21); \
stw r20,_CCR(r21); /* save registers */ \
stw r22,GPR22(r21); \
stw r23,GPR23(r21); \
mfspr r20,SPRG0; \
stw r20,GPR20(r21); \
mfspr r22,SPRG1; \
stw r22,GPR21(r21); \
mflr r20; \
stw r20,_LINK(r21); \
mfctr r22; \
stw r22,_CTR(r21); \
mfspr r20,XER; \
stw r20,_XER(r21); \
mfspr r22,SRR0; \
mfspr r23,SRR1; \
stw r0,GPR0(r21); \
stw r1,GPR1(r21); \
stw r2,GPR2(r21); \
stw r1,0(r21); \
tovirt(r1,r21); /* set new kernel sp */ \
SAVE_4GPRS(3, r21); \
SAVE_GPR(7, r21);
mtspr SPRG0,r10; \
mtspr SPRG1,r11; \
mfcr r10; \
EXCEPTION_PROLOG_1; \
EXCEPTION_PROLOG_2
#define EXCEPTION_PROLOG_1 \
mfspr r11,SRR1; /* check whether user or kernel */ \
andi. r11,r11,MSR_PR; \
tophys(r11,r1); /* use tophys(r1) if kernel */ \
beq 1f; \
mfspr r11,SPRG3; \
lwz r11,THREAD_INFO-THREAD(r11); \
addi r11,r11,THREAD_SIZE; \
tophys(r11,r11); \
1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
#define EXCEPTION_PROLOG_2 \
CLR_TOP32(r11); \
stw r10,_CCR(r11); /* save registers */ \
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
mfspr r10,SPRG0; \
stw r10,GPR10(r11); \
mfspr r12,SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r12,SRR0; \
mfspr r9,SRR1; \
stw r1,GPR1(r11); \
stw r1,0(r11); \
tovirt(r1,r11); /* set new kernel sp */ \
li r10,MSR_; /* can now take exceptions again */ \
MTMSRD(r10); /* (except for mach check in rtas) */ \
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
/*
* Note: code which follows this uses cr0.eq (set if from kernel),
* r21, r22 (SRR0), and r23 (SRR1).
* r11, r12 (SRR0), and r9 (SRR1).
*
* Note2: once we have set r1 we are in a position to take exceptions
* again, and we could thus set MSR:RI at that point.
*/
/*
* Exception vectors.
*/
#define STD_EXCEPTION(n, label, hdlr) \
#define EXCEPTION(n, label, hdlr, xfer) \
. = n; \
label: \
EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
li r20,MSR_KERNEL; \
bl transfer_to_handler; \
i##n: \
.long hdlr; \
.long ret_from_except
xfer(n, hdlr)
#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
li r10,trap; \
stw r10,TRAP(r11); \
li r10,MSR_KERNEL; \
copyee(r10, r9); \
bl tfer; \
i##n: \
.long hdlr; \
.long ret
#define COPY_EE(d, s) rlwimi d,s,0,16,16
#define NOCOPY(d, s)
#define EXC_XFER_STD(n, hdlr) \
EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_LITE(n, hdlr) \
EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
ret_from_except)
#define EXC_XFER_EE(n, hdlr) \
EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_EE_LITE(n, hdlr) \
EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
ret_from_except)
/* System reset */
/* core99 pmac starts the seconary here by changing the vector, and
......@@ -290,11 +327,43 @@ i##n: \
. = 0x100
b __secondary_start_gemini
#else
STD_EXCEPTION(0x100, Reset, UnknownException)
EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
#endif
/* Machine check */
STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
/*
* On CHRP, this is complicated by the fact that we could get a
* machine check inside RTAS, and we have no guarantee that certain
* critical registers will have the values we expect. The set of
* registers that might have bad values includes all the GPRs
* and all the BATs. We indicate that we are in RTAS by putting
* a non-zero value, the address of the exception frame to use,
* in SPRG2. The machine check handler checks SPRG2 and uses its
* value if it is non-zero. If we ever needed to free up SPRG2,
* we could use a field in the thread_info or thread_struct instead.
* (Other exception handlers assume that r1 is a valid kernel stack
* pointer when we take an exception from supervisor mode.)
* -- paulus.
*/
. = 0x200
MachineCheck:
mtspr SPRG0,r10
mtspr SPRG1,r11
mfcr r10
#ifdef CONFIG_ALL_PPC
mfspr r11,SPRG2
cmpwi 0,r11,0
bne 7f
#endif /* CONFIG_ALL_PPC */
EXCEPTION_PROLOG_1
7: EXCEPTION_PROLOG_2
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_ALL_PPC
mfspr r4,SPRG2
cmpwi cr1,r4,0
bne cr1,machine_check_in_rtas
#endif
EXC_XFER_STD(0x200, MachineCheckException)
/* Data access exception. */
. = 0x300
......@@ -305,37 +374,24 @@ DataAccessCont:
DataAccess:
EXCEPTION_PROLOG
#endif /* CONFIG_PPC64BRIDGE */
mfspr r20,DSISR
andis. r0,r20,0xa470 /* weird error? */
mfspr r10,DSISR
andis. r0,r10,0xa470 /* weird error? */
bne 1f /* if not, try to put a PTE */
mfspr r4,DAR /* into the hash table */
rlwinm r3,r20,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
bl hash_page
1: stw r20,_DSISR(r21)
mr r5,r20
1: stw r10,_DSISR(r11)
mr r5,r10
mfspr r4,DAR
stw r4,_DAR(r21)
stw r4,_DAR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
i0x300:
.long do_page_fault
.long ret_from_except
andi. r0,r9,MSR_PR /* set cr0.eq if from kernel */
EXC_XFER_EE_LITE(0x300, do_page_fault)
#ifdef CONFIG_PPC64BRIDGE
/* SLB fault on data access. */
. = 0x380
b DataSegment
DataSegmentCont:
mfspr r4,DAR
stw r4,_DAR(r21)
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
.long UnknownException
.long ret_from_except
#endif /* CONFIG_PPC64BRIDGE */
/* Instruction access exception. */
......@@ -347,135 +403,79 @@ InstructionAccessCont:
InstructionAccess:
EXCEPTION_PROLOG
#endif /* CONFIG_PPC64BRIDGE */
andis. r0,r23,0x4000 /* no pte found? */
andis. r0,r9,0x4000 /* no pte found? */
beq 1f /* if so, try to put a PTE */
li r3,0 /* into the hash table */
mr r4,r22 /* SRR0 is fault address */
mr r4,r12 /* SRR0 is fault address */
bl hash_page
1: addi r3,r1,STACK_FRAME_OVERHEAD
mr r4,r22
mr r5,r23
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
i0x400:
.long do_page_fault
.long ret_from_except
mr r4,r12
mr r5,r9
andi. r0,r9,MSR_PR /* set cr0.eq if from kernel */
EXC_XFER_EE_LITE(0x400, do_page_fault)
#ifdef CONFIG_PPC64BRIDGE
/* SLB fault on instruction access. */
. = 0x480
b InstructionSegment
InstructionSegmentCont:
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
.long UnknownException
.long ret_from_except
#endif /* CONFIG_PPC64BRIDGE */
/* External interrupt */
. = 0x500;
HardwareInterrupt:
EXCEPTION_PROLOG;
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
li r4,0
bl transfer_to_handler
.globl do_IRQ_intercept
do_IRQ_intercept:
.long do_IRQ;
.long ret_from_intercept
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
/* Alignment exception */
. = 0x600
Alignment:
EXCEPTION_PROLOG
mfspr r4,DAR
stw r4,_DAR(r21)
stw r4,_DAR(r11)
mfspr r5,DSISR
stw r5,_DSISR(r21)
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
i0x600:
.long AlignmentException
.long ret_from_except
EXC_XFER_EE(0x600, AlignmentException)
/* Program check exception */
. = 0x700
ProgramCheck:
EXCEPTION_PROLOG
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
i0x700:
.long ProgramCheckException
.long ret_from_except
EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_EE)
/* Floating-point unavailable */
. = 0x800
FPUnavailable:
EXCEPTION_PROLOG
bne load_up_fpu /* if from user, just load it up */
li r20,MSR_KERNEL
bl transfer_to_handler /* if from kernel, take a trap */
i0x800:
.long KernelFP
.long ret_from_except
. = 0x900
Decrementer:
EXCEPTION_PROLOG
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
bl transfer_to_handler
.globl timer_interrupt_intercept
timer_interrupt_intercept:
.long timer_interrupt
.long ret_from_intercept
EXC_XFER_EE_LITE(0x800, KernelFP)
/* Decrementer */
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
/* System call */
. = 0xc00
SystemCall:
EXCEPTION_PROLOG
stw r3,ORIG_GPR3(r21)
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
bl transfer_to_handler
.long DoSyscall
.long ret_from_except
EXC_XFER_EE_LITE(0xc00, DoSyscall)
/* Single step - not used on 601 */
STD_EXCEPTION(0xd00, SingleStep, SingleStepException)
STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_EE)
EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
/*
* The Altivec unavailable trap is at 0x0f20. Foo.
* We effectively remap it to 0x3000.
*/
. = 0xf00
b Trap_0f
trap_0f_cont:
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
bl transfer_to_handler
.long UnknownException
.long ret_from_except
. = 0xf20
#ifdef CONFIG_ALTIVEC
b Trap_0f
. = 0xf20
b AltiVecUnavailable
#endif
Trap_0f:
#endif
EXCEPTION_PROLOG
b trap_0f_cont
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE(0xf00, UnknownException)
/*
* Handle TLB miss for instruction on 603/603e.
......@@ -677,35 +677,35 @@ DataStoreTLBMiss:
mtcrf 0x80,r3
rfi
STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)
STD_EXCEPTION(0x1400, SMI, SMIException)
STD_EXCEPTION(0x1500, Trap_15, UnknownException)
STD_EXCEPTION(0x1600, Trap_16, UnknownException)
STD_EXCEPTION(0x1700, Trap_17, TAUException)
STD_EXCEPTION(0x1800, Trap_18, UnknownException)
STD_EXCEPTION(0x1900, Trap_19, UnknownException)
STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
STD_EXCEPTION(0x2000, RunMode, RunModeException)
STD_EXCEPTION(0x2100, Trap_21, UnknownException)
STD_EXCEPTION(0x2200, Trap_22, UnknownException)
STD_EXCEPTION(0x2300, Trap_23, UnknownException)
STD_EXCEPTION(0x2400, Trap_24, UnknownException)
STD_EXCEPTION(0x2500, Trap_25, UnknownException)
STD_EXCEPTION(0x2600, Trap_26, UnknownException)
STD_EXCEPTION(0x2700, Trap_27, UnknownException)
STD_EXCEPTION(0x2800, Trap_28, UnknownException)
STD_EXCEPTION(0x2900, Trap_29, UnknownException)
STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE)
EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE)
EXCEPTION(0x2f00, Trap_2f, UnknownException, EXC_XFER_EE)
. = 0x3000
......@@ -713,25 +713,29 @@ DataStoreTLBMiss:
AltiVecUnavailable:
EXCEPTION_PROLOG
bne load_up_altivec /* if from user, just load it up */
li r20,MSR_KERNEL
bl transfer_to_handler /* if from kernel, take a trap */
.long KernelAltiVec
.long ret_from_except
EXC_XFER_EE_LITE(0xf20, KernelAltiVec)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64BRIDGE
DataAccess:
EXCEPTION_PROLOG
b DataAccessCont
InstructionAccess:
EXCEPTION_PROLOG
b InstructionAccessCont
DataSegment:
EXCEPTION_PROLOG
b DataSegmentCont
addi r3,r1,STACK_FRAME_OVERHEAD
mfspr r4,DAR
stw r4,_DAR(r11)
EXC_XFER_STD(0x380, UnknownException)
InstructionSegment:
EXCEPTION_PROLOG
b InstructionSegmentCont
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_STD(0x480, UnknownException)
#endif /* CONFIG_PPC64BRIDGE */
/*
......@@ -769,16 +773,16 @@ load_up_fpu:
lwz r5,PT_REGS(r4)
add r5,r5,r6
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r20,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r20 /* disable FP for previous task */
li r10,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r10 /* disable FP for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
mfspr r5,SPRG3 /* current task's THREAD (phys) */
lwz r4,THREAD_FPEXC_MODE(r5)
ori r23,r23,MSR_FP /* enable FP for current */
or r23,r23,r4
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
lfd fr0,THREAD_FPSCR-4(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
......@@ -788,21 +792,57 @@ load_up_fpu:
stw r4,last_task_used_math@l(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
lwz r3,_CCR(r21)
lwz r4,_LINK(r21)
mtcrf 0xff,r3
mtlr r4
REST_GPR(1, r21)
REST_4GPRS(3, r21)
/* we haven't used ctr or xer */
mtspr SRR1,r23
mtspr SRR0,r22
REST_GPR(20, r21)
REST_2GPRS(22, r21)
lwz r21,GPR21(r21)
/* we haven't used ctr or xer or lr */
/* fall through to fast_exception_return */
.globl fast_exception_return
fast_exception_return:
andi. r10,r9,MSR_RI /* check for recoverable interrupt */
beq 1f /* if not, we've got problems */
2: REST_4GPRS(3, r11)
lwz r10,_CCR(r11)
REST_GPR(1, r11)
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
REST_GPR(10, r11)
mtspr SRR1,r9
mtspr SRR0,r12
REST_GPR(9, r11)
REST_GPR(12, r11)
lwz r11,GPR11(r11)
SYNC
RFI
/* check if the exception happened in a restartable section */
1: lis r3,exc_exit_restart_end@ha
addi r3,r3,exc_exit_restart_end@l
cmplw r12,r3
bge 3f
lis r4,exc_exit_restart@ha
addi r4,r4,exc_exit_restart@l
cmplw r12,r4
blt 3f
lis r3,fee_restarts@ha
tophys(r3,r3)
lwz r5,fee_restarts@l(r3)
addi r5,r5,1
stw r5,fee_restarts@l(r3)
mr r12,r4 /* restart at exc_exit_restart */
b 2b
.comm fee_restarts,4
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
3: li r10,0
stw r10,TRAP(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
li r10,MSR_KERNEL
bl transfer_to_handler_full
.long nonrecoverable_exception
.long ret_from_except
/*
* FP unavailable trap from kernel - print a message, but let
* the task use FP in the kernel until it returns to user mode.
......@@ -818,7 +858,7 @@ KernelFP:
bl printk
b ret_from_except
86: .string "floating point used in kernel (task=%p, pc=%x)\n"
.align 4
.align 4,0
#ifdef CONFIG_ALTIVEC
/* Note that the AltiVec support is closely modeled after the FP
......@@ -842,57 +882,40 @@ load_up_altivec:
* to another. Instead we call giveup_altivec in switch_to.
*/
#ifndef CONFIG_SMP
#ifndef CONFIG_APUS
lis r6,-KERNELBASE@h
#else
lis r6,CYBERBASEp@h
lwz r6,0(r6)
#endif
tophys(r6,0)
addis r3,r6,last_task_used_altivec@ha
lwz r4,last_task_used_altivec@l(r3)
cmpi 0,r4,0
beq 1f
add r4,r4,r6
addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
SAVE_32VR(0,r20,r4)
SAVE_32VR(0,r10,r4)
MFVSCR(vr0)
li r20,THREAD_VSCR
STVX(vr0,r20,r4)
li r10,THREAD_VSCR
STVX(vr0,r10,r4)
lwz r5,PT_REGS(r4)
add r5,r5,r6
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r20,MSR_VEC@h
andc r4,r4,r20 /* disable altivec for previous task */
lis r10,MSR_VEC@h
andc r4,r4,r10 /* disable altivec for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of AltiVec after return */
oris r23,r23,MSR_VEC@h
oris r9,r9,MSR_VEC@h
mfspr r5,SPRG3 /* current task's THREAD (phys) */
li r20,THREAD_VSCR
LVX(vr0,r20,r5)
li r10,THREAD_VSCR
LVX(vr0,r10,r5)
MTVSCR(vr0)
REST_32VR(0,r20,r5)
REST_32VR(0,r10,r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
sub r4,r4,r6
stw r4,last_task_used_altivec@l(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
lwz r3,_CCR(r21)
lwz r4,_LINK(r21)
mtcrf 0xff,r3
mtlr r4
REST_GPR(1, r21)
REST_4GPRS(3, r21)
/* we haven't used ctr or xer */
mtspr SRR1,r23
mtspr SRR0,r22
REST_GPR(20, r21)
REST_2GPRS(22, r21)
lwz r21,GPR21(r21)
SYNC
RFI
/* we haven't used ctr or xer or lr */
b fast_exception_return
/*
* AltiVec unavailable trap from kernel - print a message, but let
......@@ -909,7 +932,7 @@ KernelAltiVec:
bl printk
b ret_from_except
87: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
.align 4
.align 4,0
/*
* giveup_altivec(tsk)
......@@ -1180,7 +1203,7 @@ __secondary_start:
CLR_TOP32(r4)
mtspr SPRG3,r4
li r3,0
mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
mtspr SPRG2,r3 /* 0 => not in RTAS */
/* enable MMU and jump to start_secondary */
li r4,MSR_KERNEL
......@@ -1231,6 +1254,18 @@ _GLOBAL(__setup_cpu_7450)
bl setup_7450_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_7450_23)
mflr r4
bl setup_common_caches
bl setup_7450_23_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_7455)
mflr r4
bl setup_common_caches
bl setup_7455_hid0
mtlr r4
blr
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(__setup_cpu_generic)
......@@ -1298,21 +1333,53 @@ setup_750_7400_hid0:
*/
setup_7450_hid0:
/* We check for the presence of an L3 cache setup by
* the firmware. If any, we disable DOZE capability
* the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier
*/
mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h
beq 1f
li r7,CPU_FTR_CAN_DOZE
li r7,CPU_FTR_CAN_NAP
lwz r6,CPU_SPEC_FEATURES(r5)
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
1:
setup_7450_23_hid0:
mfspr r11,HID0
/* All of the bits we have to set.....
*/
ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC
ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
/* All of the bits we have to clear....
*/
li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
mtspr ICTC,r3 /* Instruction Cache Throttling off */
isync
mtspr HID0,r11
sync
isync
blr
/* 7450
* Enable Store Gathering (SGE), Branch Folding (FOLD)
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
* Ensure our data cache instructions really operate.
* Timebase has to be running or we wouldn't have made it here,
* just ensure we don't disable it.
* Clear Instruction cache throttling (ICTC)
*/
setup_7455_hid0:
mfspr r11,HID0
/* All of the bits we have to set.....
*/
ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
/* All of the bits we have to clear....
......@@ -1384,7 +1451,7 @@ start_here:
CLR_TOP32(r4)
mtspr SPRG3,r4
li r3,0
mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
mtspr SPRG2,r3 /* 0 => not in RTAS */
/* stack */
lis r1,init_thread_union@ha
......@@ -1481,6 +1548,10 @@ _GLOBAL(set_context)
stw r4, 0x4(r5)
#endif
li r4,0
BEGIN_FTR_SECTION
dssall 0
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3:
#ifdef CONFIG_PPC64BRIDGE
slbie r4
......@@ -1490,7 +1561,7 @@ _GLOBAL(set_context)
rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
SYNC_601
sync
isync
blr
......@@ -1503,35 +1574,35 @@ _GLOBAL(set_context)
* -- Cort
*/
clear_bats:
li r20,0
li r10,0
mfspr r9,PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi r9, 1
beq 1f
mtspr DBAT0U,r20
mtspr DBAT0L,r20
mtspr DBAT1U,r20
mtspr DBAT1L,r20
mtspr DBAT2U,r20
mtspr DBAT2L,r20
mtspr DBAT3U,r20
mtspr DBAT3L,r20
mtspr DBAT0U,r10
mtspr DBAT0L,r10
mtspr DBAT1U,r10
mtspr DBAT1L,r10
mtspr DBAT2U,r10
mtspr DBAT2L,r10
mtspr DBAT3U,r10
mtspr DBAT3L,r10
1:
mtspr IBAT0U,r20
mtspr IBAT0L,r20
mtspr IBAT1U,r20
mtspr IBAT1L,r20
mtspr IBAT2U,r20
mtspr IBAT2L,r20
mtspr IBAT3U,r20
mtspr IBAT3L,r20
mtspr IBAT0U,r10
mtspr IBAT0L,r10
mtspr IBAT1U,r10
mtspr IBAT1L,r10
mtspr IBAT2U,r10
mtspr IBAT2L,r10
mtspr IBAT3U,r10
mtspr IBAT3L,r10
blr
flush_tlbs:
lis r20, 0x40
1: addic. r20, r20, -0x1000
tlbie r20
lis r10, 0x40
1: addic. r10, r10, -0x1000
tlbie r10
blt 1b
sync
blr
......
......@@ -42,11 +42,6 @@
#include <asm/ppc_asm.h>
#include "asm-offsets.h"
/* Preprocessor Defines */
#define STND_EXC 0
#define CRIT_EXC 1
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
* optional, information:
......@@ -70,7 +65,6 @@ _GLOBAL(_start)
mr r29,r5
mr r28,r6
mr r27,r7
li r24,0 /* CPU number */
/* We have to turn on the MMU right away so we get cache modes
* set correctly.
......@@ -89,197 +83,231 @@ turn_on_mmu:
SYNC
rfi /* enables MMU */
/* Exception vector entry code. This code runs with address translation
/*
* This area is used for temporarily saving registers during the
* critical exception prolog.
*/
. = 0xc0
crit_save:
.space 8
/*
* Exception vector entry code. This code runs with address translation
* turned off (i.e. using physical addresses). We assume SPRG3 has the
* physical address of the current task thread_struct.
* Note that we have to have decremented r1 before we write to any fields
* of the exception frame, since a critical interrupt could occur at any
* time, and it will write to the area immediately below the current r1.
*/
#define NORMAL_EXCEPTION_PROLOG \
mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
mtspr SPRN_SPRG1,r11; \
mtspr SPRN_SPRG2,r1; \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
beq 1f; \
mfspr r1,SPRG3; /* if from user, start at top of */\
lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
addi r1,r1,THREAD_SIZE; \
1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
tophys(r11,r1); \
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
mfspr r10,SPRG0; \
stw r10,GPR10(r11); \
mfspr r12,SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r10,SPRG2; \
mfspr r12,SRR0; \
stw r10,GPR1(r11); \
mfspr r9,SRR1; \
stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
#define COMMON_PROLOG(n) \
0: mtspr SPRN_SPRG0,r20; /* We need r20, move it to SPRG0 */\
mtspr SPRN_SPRG1,r21; /* We need r21, move it to SPRG1 */\
mfcr r20; /* We need the CR, move it to r20 */\
mfspr r21,SPRN_SPRG2; /* Exception stack to use */\
cmpwi cr0,r21,0; /* From user mode or RTAS? */\
bne 1f; /* Not RTAS, branch */\
tophys(r21, r1); /* Convert vka in r1 to pka in r21 */\
subi r21,r21,INT_FRAME_SIZE; /* Allocate an exception frame */\
1: stw r20,_CCR(r21); /* Save CR on the stack */\
stw r22,GPR22(r21); /* Save r22 on the stack */\
stw r23,GPR23(r21); /* r23 Save on the stack */\
mfspr r20,SPRN_SPRG0; /* Get r20 back out of SPRG0 */\
stw r20,GPR20(r21); /* Save r20 on the stack */\
mfspr r22,SPRN_SPRG1; /* Get r21 back out of SPRG0 */\
stw r22,GPR21(r21); /* Save r21 on the stack */\
mflr r20; \
stw r20,_LINK(r21); /* Save LR on the stack */\
mfctr r22; \
stw r22,_CTR(r21); /* Save CTR on the stack */\
mfspr r20,XER; \
stw r20,_XER(r21); /* Save XER on the stack */\
mfspr r20,SPRN_DBCR0; \
stw r20,_DBCR0(r21); /* Save Debug Control on the stack */
#define COMMON_EPILOG \
stw r0,GPR0(r21); /* Save r0 on the stack */\
stw r1,GPR1(r21); /* Save r1 on the stack */\
stw r2,GPR2(r21); /* Save r2 on the stack */\
stw r1,0(r21); \
tovirt(r1,r21); /* Set-up new kernel stack pointer */\
SAVE_4GPRS(3, r21); /* Save r3 through r6 on the stack */\
SAVE_GPR(7, r21); /* Save r7 on the stack */
#define STND_EXCEPTION_PROLOG(n) \
COMMON_PROLOG(n); \
mfspr r22,SPRN_SRR0; /* Faulting instruction address */\
lis r20,MSR_WE@h; \
mfspr r23,SPRN_SRR1; /* MSR at the time of fault */\
andc r23,r23,r20; /* disable processor wait state */\
COMMON_EPILOG;
#define CRIT_EXCEPTION_PROLOG(n) \
COMMON_PROLOG(n); \
mfspr r22,SPRN_SRR2; /* Faulting instruction address */\
lis r20,MSR_WE@h; \
mfspr r23,SPRN_SRR3; /* MSR at the time of fault */\
andc r23,r23,r20; /* disable processor wait state */\
COMMON_EPILOG;
/*
* Exception prolog for critical exceptions. This is a little different
* from the normal exception prolog above since a critical exception
* can potentially occur at any point during normal exception processing.
* Thus we cannot use the same SPRG registers as the normal prolog above.
* Instead we use a couple of words of memory at low physical addresses.
* This is OK since we don't support SMP on these processors.
*/
#define CRITICAL_EXCEPTION_PROLOG \
stw r10,crit_save@l(0); /* save two registers to work with */\
stw r11,4+crit_save@l(0); \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
mr r11,r1; \
beq 1f; \
mfspr r11,SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
tophys(r11,r11); \
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
lwz r10,crit_save@l(0); \
stw r10,GPR10(r11); \
lwz r12,4+crit_save@l(0); \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r12,SRR0; /* save SRR0 and SRR1 in the frame */\
stw r12,_SRR0(r11); /* since they may have had stuff */\
mfspr r9,SRR1; /* in them at the point where the */\
stw r9,_SRR1(r11); /* exception was taken */\
mfspr r12,SRR2; \
stw r1,GPR1(r11); \
mfspr r9,SRR3; \
stw r1,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
/*
* Exception vectors.
*/
#define START_EXCEPTION(n, label) \
. = n; \
label:
#define FINISH_EXCEPTION(func) \
bl transfer_to_handler_full; \
.long func; \
.long ret_from_except_full
#define FINISH_EXCEPTION(func) \
bl transfer_to_handler; \
.long func; \
.long ret_from_except
#define EXCEPTION(n, label, hdlr, xfer) \
START_EXCEPTION(n, label); \
NORMAL_EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
xfer(n, hdlr)
#define STND_EXCEPTION(n, label, func) \
START_EXCEPTION(n, label); \
STND_EXCEPTION_PROLOG(n); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
li r7,STND_EXC; \
li r20,MSR_KERNEL; \
FINISH_EXCEPTION(func)
#define CRITICAL_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(n, label); \
CRITICAL_EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_TEMPLATE(hdlr, trap, copyee, tfer, ret) \
li r10,trap; \
stw r10,TRAP(r11); \
li r10,MSR_KERNEL; \
copyee(r10, r9); \
bl tfer; \
.long hdlr; \
.long ret
#define CRIT_EXCEPTION(n, label, func) \
START_EXCEPTION(n, label); \
CRIT_EXCEPTION_PROLOG(n); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
li r7,CRIT_EXC; \
li r20,MSR_KERNEL; \
FINISH_EXCEPTION(func)
#define COPY_EE(d, s) rlwimi d,s,0,16,16
#define NOCOPY(d, s)
#define EXC_XFER_STD(n, hdlr) \
EXC_XFER_TEMPLATE(hdlr, n, NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
/* Exception vectors.
*/
#define EXC_XFER_LITE(n, hdlr) \
EXC_XFER_TEMPLATE(hdlr, n+1, NOCOPY, transfer_to_handler, \
ret_from_except)
/* 0x0100 - Critical Interrupt Exception
*/
CRIT_EXCEPTION(0x0100, CriticalInterrupt, UnknownException)
#define EXC_XFER_EE(n, hdlr) \
EXC_XFER_TEMPLATE(hdlr, n, COPY_EE, transfer_to_handler_full, \
ret_from_except_full)
/* 0x0200 - Machine Check Exception
*/
#if 0
CRIT_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
#else
START_EXCEPTION(0x0200, MachineCheck)
CRIT_EXCEPTION_PROLOG(0x0200)
#define EXC_XFER_EE_LITE(n, hdlr) \
EXC_XFER_TEMPLATE(hdlr, n+1, COPY_EE, transfer_to_handler, \
ret_from_except)
/*
lis r4,0x0400
mtdcr DCRN_POB0_BESR0,r4
*/
#ifdef DCRN_POB0_BEAR
mfdcr r4,DCRN_POB0_BEAR
mfdcr r4,DCRN_POB0_BESR0
mfdcr r4,DCRN_POB0_BESR1
#endif
#ifdef DCRN_PLB0_BEAR
mfdcr r4,DCRN_PLB0_ACR
mfdcr r4,DCRN_PLB0_BEAR
mfdcr r4,DCRN_PLB0_BESR
#endif
/*
* 0x0100 - Critical Interrupt Exception
*/
CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException)
addi r3,r1,STACK_FRAME_OVERHEAD
li r7,CRIT_EXC
li r20,MSR_KERNEL
FINISH_EXCEPTION(MachineCheckException)
#endif
/*
* 0x0200 - Machine Check Exception
*/
CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
/* 0x0300 - Data Storage Exception
/*
* 0x0300 - Data Storage Exception
* This happens for just a few reasons. U0 set (but we don't do that),
* or zone protection fault (user violation, write to protected page).
* If this is just an update of modified status, we do that quickly
* and exit. Otherwise, we call heavywight functions to do the work.
*/
START_EXCEPTION(0x0300, DataStore)
mtspr SPRG0, r20 /* Save some working registers */
mtspr SPRG1, r21
START_EXCEPTION(0x0300, DataStorage)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
#ifdef CONFIG_403GCX
stw r22, 0(r0)
stw r23, 4(r0)
mfcr r21
mfspr r22, SPRN_PID
stw r21, 8(r0)
stw r22, 12(r0)
stw r12, 0(r0)
stw r9, 4(r0)
mfcr r11
mfspr r12, SPRN_PID
stw r11, 8(r0)
stw r12, 12(r0)
#else
mtspr SPRG4, r22
mtspr SPRG5, r23
mfcr r21
mfspr r22, SPRN_PID
mtspr SPRG7, r21
mtspr SPRG6, r22
mtspr SPRG4, r12
mtspr SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
mtspr SPRG7, r11
mtspr SPRG6, r12
#endif
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_DST as well. */
mfspr r20, SPRN_ESR
andis. r20, r20, ESR_DIZ@h
mfspr r10, SPRN_ESR
andis. r10, r10, ESR_DIZ@h
bne 2f
mfspr r20, SPRN_DEAR /* Get faulting address */
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
andis. r21, r20, 0x8000
andis. r11, r10, 0x8000
beq 3f
lis r21, swapper_pg_dir@h
ori r21, r21, swapper_pg_dir@l
li r23, 0
mtspr SPRN_PID, r23 /* TLB will have 0 TID */
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
li r9, 0
mtspr SPRN_PID, r9 /* TLB will have 0 TID */
b 4f
/* Get the PGD for the current thread.
*/
3:
mfspr r21,SPRG3
lwz r21,PGDIR(r21)
mfspr r11,SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r21, r21)
rlwimi r21, r20, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r21, 0(r21) /* Get L1 entry */
rlwinm. r22, r21, 0, 0, 19 /* Extract L2 (pte) base address */
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r11, 0(r11) /* Get L1 entry */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
beq 2f /* Bail if no table */
tophys(r22, r22)
rlwimi r22, r20, 22, 20, 29 /* Compute PTE address */
lwz r21, 0(r22) /* Get Linux PTE */
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
andi. r23, r21, _PAGE_RW /* Is it writeable? */
andi. r9, r11, _PAGE_RW /* Is it writeable? */
beq 2f /* Bail if not */
/* Update 'changed'.
*/
ori r21, r21, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
stw r21, 0(r22) /* Update Linux page table */
ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
stw r11, 0(r12) /* Update Linux page table */
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
......@@ -289,34 +317,34 @@ label:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
li r22, 0x0ce2
andc r21, r21, r22 /* Make sure 20, 21 are zero */
li r12, 0x0ce2
andc r11, r11, r12 /* Make sure 20, 21 are zero */
/* find the TLB index that caused the fault. It has to be here.
*/
tlbsx r23, 0, r20
tlbsx r9, 0, r10
tlbwe r21, r23, TLB_DATA /* Load TLB LO */
tlbwe r11, r9, TLB_DATA /* Load TLB LO */
/* Done...restore registers and get out of here.
*/
#ifdef CONFIG_403GCX
lwz r22, 12(r0)
lwz r21, 8(r0)
mtspr SPRN_PID, r22
mtcr r21
lwz r23, 4(r0)
lwz r22, 0(r0)
lwz r12, 12(r0)
lwz r11, 8(r0)
mtspr SPRN_PID, r12
mtcr r11
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r22, SPRG6
mfspr r21, SPRG7
mtspr SPRN_PID, r22
mtcr r21
mfspr r23, SPRG5
mfspr r22, SPRG4
mfspr r12, SPRG6
mfspr r11, SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
#endif
mfspr r21, SPRG1
mfspr r20, SPRG0
mfspr r11, SPRG1
mfspr r10, SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
......@@ -325,120 +353,70 @@ label:
* and call the heavyweights to help us out.
*/
#ifdef CONFIG_403GCX
lwz r22, 12(r0)
lwz r21, 8(r0)
mtspr SPRN_PID, r22
mtcr r21
lwz r23, 4(r0)
lwz r22, 0(r0)
lwz r12, 12(r0)
lwz r11, 8(r0)
mtspr SPRN_PID, r12
mtcr r11
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r22, SPRG6
mfspr r21, SPRG7
mtspr SPRN_PID, r22
mtcr r21
mfspr r23, SPRG5
mfspr r22, SPRG4
mfspr r12, SPRG6
mfspr r11, SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
#endif
mfspr r21, SPRG1
mfspr r20, SPRG0
mfspr r11, SPRG1
mfspr r10, SPRG0
b DataAccess
/* 0x0400 - Instruction Storage Exception
* I don't know why it is called "Storage"....This is caused by a fetch
* from non-execute or guarded pages.
/*
* 0x0400 - Instruction Storage Exception
* This is caused by a fetch from non-execute or guarded pages.
*/
START_EXCEPTION(0x0400, InstructionAccess)
STND_EXCEPTION_PROLOG(0x0400)
mr r4,r22 /* Pass SRR0 as arg2 */
NORMAL_EXCEPTION_PROLOG
mr r4,r12 /* Pass SRR0 as arg2 */
li r5,0 /* Pass zero as arg3 */
addi r3,r1,STACK_FRAME_OVERHEAD
li r7,STND_EXC
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* Copy EE bit from the saved MSR */
FINISH_EXCEPTION(do_page_fault) /* do_page_fault(regs, SRR0, SRR1) */
EXC_XFER_EE_LITE(0x400, do_page_fault)
/* 0x0500 - External Interrupt Exception
*/
START_EXCEPTION(0x0500, HardwareInterrupt)
STND_EXCEPTION_PROLOG(0x0500)
addi r3,r1,STACK_FRAME_OVERHEAD
li r7,STND_EXC
li r20,MSR_KERNEL
li r4,0
bl transfer_to_handler
_GLOBAL(do_IRQ_intercept)
.long do_IRQ
.long ret_from_intercept
/* 0x0600 - Alignment Exception
*/
/* 0x0500 - External Interrupt Exception */
EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
/* 0x0600 - Alignment Exception */
START_EXCEPTION(0x0600, Alignment)
STND_EXCEPTION_PROLOG(0x0600)
NORMAL_EXCEPTION_PROLOG
mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
stw r4,_DEAR(r21)
stw r4,_DEAR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
li r7,STND_EXC
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* Copy EE bit from the saved MSR */
FINISH_EXCEPTION(AlignmentException)
EXC_XFER_EE(0x600, AlignmentException)
/* 0x0700 - Program Exception
*/
START_EXCEPTION(0x0700, ProgramCheck)
STND_EXCEPTION_PROLOG(0x0700)
addi r3,r1,STACK_FRAME_OVERHEAD
li r7,STND_EXC
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* Copy EE bit from the saved MSR */
FINISH_EXCEPTION(ProgramCheckException)
/* 0x0700 - Program Exception */
EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_EE)
EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE)
EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE)
EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE)
EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE)
/* I'm stealing this unused vector location to build a standard exception
* frame for Data TLB Access errors. The other Data TLB exceptions will bail
* out to this point if they can't resolve the lightweight TLB fault.
*/
START_EXCEPTION(0x0800, DataAccess)
STND_EXCEPTION_PROLOG(0x0800)
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r21)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
stw r4,_DEAR(r21)
addi r3,r1,STACK_FRAME_OVERHEAD
li r7,STND_EXC
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* Copy EE bit from the saved MSR */
FINISH_EXCEPTION(do_page_fault) /* do_page_fault(regs, ESR, DEAR) */
STND_EXCEPTION(0x0900, Trap_09, UnknownException)
STND_EXCEPTION(0x0A00, Trap_0A, UnknownException)
STND_EXCEPTION(0x0B00, Trap_0B, UnknownException)
/* 0x0C00 - System Call Exception
*/
/* 0x0C00 - System Call Exception */
START_EXCEPTION(0x0C00, SystemCall)
STND_EXCEPTION_PROLOG(0x0C00)
stw r3,ORIG_GPR3(r21)
li r7,STND_EXC
li r20,MSR_KERNEL
rlwimi r20,r23,0,16,16 /* Copy EE bit from the saved MSR */
FINISH_EXCEPTION(DoSyscall)
STND_EXCEPTION(0x0D00, Trap_0D, UnknownException)
STND_EXCEPTION(0x0E00, Trap_0E, UnknownException)
STND_EXCEPTION(0x0F00, Trap_0F, UnknownException)
/* 0x1000 - Programmable Interval Timer (PIT) Exception
*/
START_EXCEPTION(0x1000, Decrementer)
STND_EXCEPTION_PROLOG(0x1000)
lis r0,TSR_PIS@h /* Set-up the PIT exception mask */
NORMAL_EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0xc00, DoSyscall)
EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE)
EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE)
EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE)
/* 0x1000 - Programmable Interval Timer (PIT) Exception */
START_EXCEPTION(0x1000, Decrementer)
NORMAL_EXCEPTION_PROLOG
lis r0,TSR_PIS@h
mtspr SPRN_TSR,r0 /* Clear the PIT exception */
addi r3,r1,STACK_FRAME_OVERHEAD
li r7,STND_EXC
li r20,MSR_KERNEL
bl transfer_to_handler
_GLOBAL(timer_interrupt_intercept)
.long timer_interrupt
.long ret_from_intercept
EXC_XFER_EE(0x1000, timer_interrupt)
#if 0
/* NOTE:
......@@ -452,7 +430,7 @@ _GLOBAL(timer_interrupt_intercept)
/* 0x1020 - Watchdog Timer (WDT) Exception
*/
CRIT_EXCEPTION(0x1020, WDTException, UnknownException)
CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException)
#endif
/* 0x1100 - Data TLB Miss Exception
......@@ -461,56 +439,55 @@ _GLOBAL(timer_interrupt_intercept)
* load TLB entries from the page table if they exist.
*/
START_EXCEPTION(0x1100, DTLBMiss)
mtspr SPRG0, r20 /* Save some working registers */
mtspr SPRG1, r21
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
#ifdef CONFIG_403GCX
stw r22, 0(r0)
stw r23, 4(r0)
mfcr r21
mfspr r22, SPRN_PID
stw r21, 8(r0)
stw r22, 12(r0)
stw r12, 0(r0)
stw r9, 4(r0)
mfcr r11
mfspr r12, SPRN_PID
stw r11, 8(r0)
stw r12, 12(r0)
#else
mtspr SPRG4, r22
mtspr SPRG5, r23
mfcr r21
mfspr r22, SPRN_PID
mtspr SPRG7, r21
mtspr SPRG6, r22
mtspr SPRG4, r12
mtspr SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
mtspr SPRG7, r11
mtspr SPRG6, r12
#endif
mfspr r20, SPRN_DEAR /* Get faulting address */
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
andis. r21, r20, 0x8000
andis. r11, r10, 0x8000
beq 3f
lis r21, swapper_pg_dir@h
ori r21, r21, swapper_pg_dir@l
li r23, 0
mtspr SPRN_PID, r23 /* TLB will have 0 TID */
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
li r9, 0
mtspr SPRN_PID, r9 /* TLB will have 0 TID */
b 4f
/* Get the PGD for the current thread.
*/
3:
mfspr r21,SPRG3
lwz r21,PGDIR(r21)
mfspr r11,SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r21, r21)
rlwimi r21, r20, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r21, 0(r21) /* Get L1 entry */
rlwinm. r22, r21, 0, 0, 19 /* Extract L2 (pte) base address */
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r11, 0(r11) /* Get L1 entry */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
beq 2f /* Bail if no table */
tophys(r22, r22)
rlwimi r22, r20, 22, 20, 29 /* Compute PTE address */
lwz r21, 0(r22) /* Get Linux PTE */
andi. r23, r21, _PAGE_PRESENT
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
andi. r9, r11, _PAGE_PRESENT
beq 2f
ori r21, r21, _PAGE_ACCESSED
stw r21, 0(r22)
ori r11, r11, _PAGE_ACCESSED
stw r11, 0(r12)
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
......@@ -520,8 +497,8 @@ _GLOBAL(timer_interrupt_intercept)
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
li r22, 0x0ce2
andc r21, r21, r22 /* Make sure 20, 21 are zero */
li r12, 0x0ce2
andc r11, r11, r12 /* Make sure 20, 21 are zero */
b finish_tlb_load
......@@ -531,22 +508,22 @@ _GLOBAL(timer_interrupt_intercept)
* and call the heavyweights to help us out.
*/
#ifdef CONFIG_403GCX
lwz r22, 12(r0)
lwz r21, 8(r0)
mtspr SPRN_PID, r22
mtcr r21
lwz r23, 4(r0)
lwz r22, 0(r0)
lwz r12, 12(r0)
lwz r11, 8(r0)
mtspr SPRN_PID, r12
mtcr r11
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r22, SPRG6
mfspr r21, SPRG7
mtspr SPRN_PID, r22
mtcr r21
mfspr r23, SPRG5
mfspr r22, SPRG4
mfspr r12, SPRG6
mfspr r11, SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
#endif
mfspr r21, SPRG1
mfspr r20, SPRG0
mfspr r11, SPRG1
mfspr r10, SPRG0
b DataAccess
/* 0x1200 - Instruction TLB Miss Exception
......@@ -554,56 +531,55 @@ _GLOBAL(timer_interrupt_intercept)
* registers and bailout to a different point.
*/
START_EXCEPTION(0x1200, ITLBMiss)
mtspr SPRG0, r20 /* Save some working registers */
mtspr SPRG1, r21
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
#ifdef CONFIG_403GCX
stw r22, 0(r0)
stw r23, 4(r0)
mfcr r21
mfspr r22, SPRN_PID
stw r21, 8(r0)
stw r22, 12(r0)
stw r12, 0(r0)
stw r9, 4(r0)
mfcr r11
mfspr r12, SPRN_PID
stw r11, 8(r0)
stw r12, 12(r0)
#else
mtspr SPRG4, r22
mtspr SPRG5, r23
mfcr r21
mfspr r22, SPRN_PID
mtspr SPRG7, r21
mtspr SPRG6, r22
mtspr SPRG4, r12
mtspr SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
mtspr SPRG7, r11
mtspr SPRG6, r12
#endif
mfspr r20, SRR0 /* Get faulting address */
mfspr r10, SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
andis. r21, r20, 0x8000
andis. r11, r10, 0x8000
beq 3f
lis r21, swapper_pg_dir@h
ori r21, r21, swapper_pg_dir@l
li r23, 0
mtspr SPRN_PID, r23 /* TLB will have 0 TID */
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
li r9, 0
mtspr SPRN_PID, r9 /* TLB will have 0 TID */
b 4f
/* Get the PGD for the current thread.
*/
3:
mfspr r21,SPRG3
lwz r21,PGDIR(r21)
mfspr r11,SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r21, r21)
rlwimi r21, r20, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r21, 0(r21) /* Get L1 entry */
rlwinm. r22, r21, 0, 0, 19 /* Extract L2 (pte) base address */
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
lwz r11, 0(r11) /* Get L1 entry */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
beq 2f /* Bail if no table */
tophys(r22, r22)
rlwimi r22, r20, 22, 20, 29 /* Compute PTE address */
lwz r21, 0(r22) /* Get Linux PTE */
andi. r23, r21, _PAGE_PRESENT
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
andi. r9, r11, _PAGE_PRESENT
beq 2f
ori r21, r21, _PAGE_ACCESSED
stw r21, 0(r22)
ori r11, r11, _PAGE_ACCESSED
stw r11, 0(r12)
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
......@@ -613,30 +589,30 @@ _GLOBAL(timer_interrupt_intercept)
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
li r22, 0x0ce2
andc r21, r21, r22 /* Make sure 20, 21 are zero */
li r12, 0x0ce2
andc r11, r11, r12 /* Make sure 20, 21 are zero */
b finish_tlb_load
/* Done...restore registers and get out of here.
*/
#ifdef CONFIG_403GCX
lwz r22, 12(r0)
lwz r21, 8(r0)
mtspr SPRN_PID, r22
mtcr r21
lwz r23, 4(r0)
lwz r22, 0(r0)
lwz r12, 12(r0)
lwz r11, 8(r0)
mtspr SPRN_PID, r12
mtcr r11
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r22, SPRG6
mfspr r21, SPRG7
mtspr SPRN_PID, r22
mtcr r21
mfspr r23, SPRG5
mfspr r22, SPRG4
mfspr r12, SPRG6
mfspr r11, SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
#endif
mfspr r21, SPRG1
mfspr r20, SPRG0
mfspr r11, SPRG1
mfspr r10, SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
......@@ -645,60 +621,62 @@ _GLOBAL(timer_interrupt_intercept)
* and call the heavyweights to help us out.
*/
#ifdef CONFIG_403GCX
lwz r22, 12(r0)
lwz r21, 8(r0)
mtspr SPRN_PID, r22
mtcr r21
lwz r23, 4(r0)
lwz r22, 0(r0)
lwz r12, 12(r0)
lwz r11, 8(r0)
mtspr SPRN_PID, r12
mtcr r11
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r22, SPRG6
mfspr r21, SPRG7
mtspr SPRN_PID, r22
mtcr r21
mfspr r23, SPRG5
mfspr r22, SPRG4
mfspr r12, SPRG6
mfspr r11, SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
#endif
mfspr r21, SPRG1
mfspr r20, SPRG0
mfspr r11, SPRG1
mfspr r10, SPRG0
b InstructionAccess
STND_EXCEPTION(0x1300, Trap_13, UnknownException)
STND_EXCEPTION(0x1400, Trap_14, UnknownException)
STND_EXCEPTION(0x1500, Trap_15, UnknownException)
STND_EXCEPTION(0x1600, Trap_16, UnknownException)
EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
#ifdef CONFIG_IBM405_ERR51
/* 405GP errata 51 */
START_EXCEPTION(0x1700, Trap_17)
b DTLBMiss
#else
STND_EXCEPTION(0x1700, Trap_17, UnknownException)
EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE)
#endif
STND_EXCEPTION(0x1800, Trap_18, UnknownException)
STND_EXCEPTION(0x1900, Trap_19, UnknownException)
STND_EXCEPTION(0x1A00, Trap_1A, UnknownException)
STND_EXCEPTION(0x1B00, Trap_1B, UnknownException)
STND_EXCEPTION(0x1C00, Trap_1C, UnknownException)
STND_EXCEPTION(0x1D00, Trap_1D, UnknownException)
STND_EXCEPTION(0x1E00, Trap_1E, UnknownException)
STND_EXCEPTION(0x1F00, Trap_1F, UnknownException)
EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE)
EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE)
/* 0x2000 - Debug Exception
*/
START_EXCEPTION(0x2000, DebugTrap)
b check_single_step_in_exception
ret_to_debug_exception:
CRIT_EXCEPTION_PROLOG(0x2000)
addi r3,r1,STACK_FRAME_OVERHEAD
li r7,CRIT_EXC;
li r20,MSR_KERNEL
FINISH_EXCEPTION(DebugException)
CRITICAL_EXCEPTION(0x2000, DebugTrap, DebugException)
/* Make sure the final interrupt handler has not spilled past the
* end of its allotted space.
/*
* The other Data TLB exceptions bail out to this point
* if they can't resolve the lightweight TLB fault.
*/
.=0x2100
DataAccess:
NORMAL_EXCEPTION_PROLOG
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
stw r4,_DEAR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x300, do_page_fault)
#if 0
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
* where an instruction that we are trying to single step causes
......@@ -718,37 +696,38 @@ check_single_step_in_exception:
* handler and must be the first instruction of every exception
* handler.
*/
mtspr SPRN_SPRG0,r20 /* Save some working registers... */
mtspr SPRN_SPRG1,r21
mfcr r20 /* ..and the cr because we change it */
mtspr SPRN_SPRG0,r10 /* Save some working registers... */
mtspr SPRN_SPRG1,r11
mfcr r10 /* ..and the cr because we change it */
mfspr r21,SPRN_SRR3 /* MSR at the time of fault */
andi. r21,r21,MSR_PR
mfspr r11,SPRN_SRR3 /* MSR at the time of fault */
andi. r11,r11,MSR_PR
bne+ 2f /* trapped from problem state */
mfspr r21,SPRN_SRR2 /* Faulting instruction address */
cmplwi r21,0x2100
mfspr r11,SPRN_SRR2 /* Faulting instruction address */
cmplwi r11,0x2100
bgt+ 2f /* address above exception vectors */
lis r21,DBSR_IC@h /* Remove the trap status */
mtspr SPRN_DBSR,r21
lis r11,DBSR_IC@h /* Remove the trap status */
mtspr SPRN_DBSR,r11
mfspr r21,SPRN_SRR3
rlwinm r21,r21,0,23,21 /* clear MSR_DE */
mtspr SPRN_SRR3, r21 /* restore MSR at rcfi without DE */
mfspr r11,SPRN_SRR3
rlwinm r11,r11,0,23,21 /* clear MSR_DE */
mtspr SPRN_SRR3, r11 /* restore MSR at rcfi without DE */
mtcrf 0xff,r20 /* restore registers */
mfspr r21,SPRN_SPRG1
mfspr r20,SPRN_SPRG0
mtcrf 0xff,r10 /* restore registers */
mfspr r11,SPRN_SPRG1
mfspr r10,SPRN_SPRG0
sync
rfci /* return to the exception handler */
2:
mtcrf 0xff,r20 /* restore registers */
mfspr r21,SPRN_SPRG1
mfspr r20,SPRN_SPRG0
mtcrf 0xff,r10 /* restore registers */
mfspr r11,SPRN_SPRG1
mfspr r10,SPRN_SPRG0
b ret_to_debug_exception
#endif 0
/* Other PowerPC processors, namely those derived from the 6xx-series
* have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
......@@ -759,9 +738,9 @@ check_single_step_in_exception:
/* Damn, I came up one instruction too many to fit into the
* exception space :-). Both the instruction and data TLB
* miss get to this point to load the TLB.
* r20 - EA of fault
* r21 - TLB LO (info from Linux PTE)
* r22, r23 - avilable to use
* r10 - EA of fault
* r11 - TLB LO (info from Linux PTE)
* r12, r9 - avilable to use
* PID - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
* Actually, it will fit now, but oh well.....a common place
......@@ -773,67 +752,59 @@ finish_tlb_load:
* instruction pages by copying data, we have to check if the
* EPN is already in the TLB.
*/
tlbsx. r23, 0, r20
tlbsx. r9, 0, r10
beq 6f
/* load the next available TLB index.
*/
lis r22, tlb_4xx_index@h
ori r22, r22, tlb_4xx_index@l
tophys(r22, r22)
lwz r23, 0(r22)
addi r23, r23, 1
lis r12, tlb_4xx_index@h
ori r12, r12, tlb_4xx_index@l
tophys(r12, r12)
lwz r9, 0(r12)
addi r9, r9, 1
#ifdef CONFIG_PIN_TLB
cmpwi 0, r23, 61 /* reserve entries 62, 63 for kernel */
cmpwi 0, r9, 61 /* reserve entries 62, 63 for kernel */
ble 7f
li r23, 0
li r9, 0
7:
#else
andi. r23, r23, (PPC4XX_TLB_SIZE-1)
andi. r9, r9, (PPC4XX_TLB_SIZE-1)
#endif
stw r23, 0(r22)
stw r9, 0(r12)
6:
tlbwe r21, r23, TLB_DATA /* Load TLB LO */
tlbwe r11, r9, TLB_DATA /* Load TLB LO */
/* Create EPN. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0, and ensure
* bits 20 and 21 are zero.
*/
li r22, 0x00c0
rlwimi r20, r22, 0, 20, 31
tlbwe r20, r23, TLB_TAG /* Load TLB HI */
li r12, 0x00c0
rlwimi r10, r12, 0, 20, 31
tlbwe r10, r9, TLB_TAG /* Load TLB HI */
/* Done...restore registers and get out of here.
*/
#ifdef CONFIG_403GCX
lwz r22, 12(r0)
lwz r21, 8(r0)
mtspr SPRN_PID, r22
mtcr r21
lwz r23, 4(r0)
lwz r22, 0(r0)
lwz r12, 12(r0)
lwz r11, 8(r0)
mtspr SPRN_PID, r12
mtcr r11
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r22, SPRG6
mfspr r21, SPRG7
mtspr SPRN_PID, r22
mtcr r21
mfspr r23, SPRG5
mfspr r22, SPRG4
mfspr r12, SPRG6
mfspr r11, SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
#endif
mfspr r21, SPRG1
mfspr r20, SPRG0
mfspr r11, SPRG1
mfspr r10, SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
/* extern void giveup_altivec(struct task_struct *prev)
*
* The PowerPC 4xx family of processors do not have AltiVec capabilities, so
* this just returns.
*/
_GLOBAL(giveup_altivec)
blr
/* extern void giveup_fpu(struct task_struct *prev)
*
* The PowerPC 4xx family of processors do not have an FPU, so this just
......@@ -842,16 +813,6 @@ _GLOBAL(giveup_altivec)
_GLOBAL(giveup_fpu)
blr
/* extern void abort(void)
*
* At present, this routine just applies a system reset.
*/
_GLOBAL(abort)
mfspr r13,SPRN_DBCR0
oris r13,r13,DBCR_RST(DBCR_RST_SYSTEM)@h
mtspr SPRN_DBCR0,r13
/* This is where the main kernel code starts.
*/
start_here:
......@@ -864,8 +825,6 @@ start_here:
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRG3,r4
li r3,0
mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
/* stack */
lis r1,init_thread_union@ha
......@@ -982,6 +941,10 @@ initial_mmu:
blr
_GLOBAL(abort)
mfspr r13,SPRN_DBCR0
oris r13,r13,DBCR_RST(DBCR_RST_SYSTEM)@h
mtspr SPRN_DBCR0,r13
_GLOBAL(set_context)
......
......@@ -1017,20 +1017,28 @@ _GLOBAL(cvt_df)
* kernel_thread(fn, arg, flags)
*/
_GLOBAL(kernel_thread)
mr r6,r3 /* function */
stwu r1,-16(r1)
stw r30,8(r1)
stw r31,12(r1)
mr r30,r3 /* function */
mr r31,r4 /* argument */
ori r3,r5,CLONE_VM /* flags */
li r0,__NR_clone
sc
cmpi 0,r3,0 /* parent or child? */
bnelr /* return if parent */
bne 1f /* return if parent */
li r0,0 /* make top-level stack frame */
stwu r0,-16(r1)
mtlr r6 /* fn addr in lr */
mr r3,r4 /* load arg and call fn */
mtlr r30 /* fn addr in lr */
mr r3,r31 /* load arg and call fn */
blrl
li r0,__NR_exit /* exit after child exits */
li r0,__NR_exit /* exit if function returns */
li r3,0
sc
1: lwz r30,8(r1)
lwz r31,12(r1)
addi r1,r1,16
blr
/*
* This routine is just here to keep GCC happy - sigh...
......@@ -1050,19 +1058,15 @@ _GLOBAL(name) \
#define __NR__exit __NR_exit
SYSCALL(sync)
SYSCALL(setsid)
SYSCALL(open)
SYSCALL(read)
SYSCALL(write)
SYSCALL(lseek)
SYSCALL(close)
SYSCALL(dup)
SYSCALL(execve)
SYSCALL(open)
SYSCALL(close)
SYSCALL(waitpid)
SYSCALL(fork)
SYSCALL(delete_module)
SYSCALL(_exit)
SYSCALL(lseek)
SYSCALL(read)
/* Why isn't this a) automatic, b) written in 'C'? */
.data
......@@ -1070,7 +1074,7 @@ SYSCALL(read)
_GLOBAL(sys_call_table)
.long sys_ni_syscall /* 0 - old "setup()" system call */
.long sys_exit
.long sys_fork
.long ppc_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
......@@ -1140,7 +1144,7 @@ _GLOBAL(sys_call_table)
.long sys_ssetmask
.long sys_setreuid /* 70 */
.long sys_setregid
.long sys_sigsuspend
.long ppc_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
......@@ -1188,7 +1192,7 @@ _GLOBAL(sys_call_table)
.long sys_ipc
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long ppc_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_modify_ldt
......@@ -1246,7 +1250,7 @@ _GLOBAL(sys_call_table)
.long sys_rt_sigpending /* 175 */
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long ppc_rt_sigsuspend
.long sys_pread
.long sys_pwrite /* 180 */
.long sys_chown
......@@ -1257,7 +1261,7 @@ _GLOBAL(sys_call_table)
.long sys_sendfile
.long sys_ni_syscall /* streams1 */
.long sys_ni_syscall /* streams2 */
.long sys_vfork
.long ppc_vfork
.long sys_getrlimit /* 190 */
.long sys_readahead
.long sys_mmap2
......
......@@ -107,10 +107,13 @@ main(void)
DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
/* The PowerPC 400-class processors have neither the DAR nor the DSISR
* SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs
* for such processors.
* for such processors. For critical interrupts we use them to
* hold SRR0 and SRR1.
*/
DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(_SRR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_SRR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
......
......@@ -75,7 +75,6 @@ int abs(int);
extern unsigned char __res[];
extern unsigned long ret_to_user_hook;
extern unsigned long mm_ptov (unsigned long paddr);
extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
......@@ -313,9 +312,7 @@ EXPORT_SYMBOL(__save_flags_ptr_end);
EXPORT_SYMBOL(__restore_flags);
EXPORT_SYMBOL(__restore_flags_end);
#endif
EXPORT_SYMBOL(timer_interrupt_intercept);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(do_IRQ_intercept);
EXPORT_SYMBOL(irq_desc);
void ppc_irq_dispatch_handler(struct pt_regs *, int);
EXPORT_SYMBOL(ppc_irq_dispatch_handler);
......@@ -356,7 +353,6 @@ EXPORT_SYMBOL(cpm_free_handler);
EXPORT_SYMBOL(request_8xxirq);
#endif
EXPORT_SYMBOL(ret_to_user_hook);
EXPORT_SYMBOL(next_mmu_context);
EXPORT_SYMBOL(set_context);
EXPORT_SYMBOL(handle_mm_fault); /* For MOL */
......@@ -366,8 +362,6 @@ EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
extern long *intercept_table;
EXPORT_SYMBOL(intercept_table);
#endif
extern long *ret_from_intercept;
EXPORT_SYMBOL(ret_from_intercept);
EXPORT_SYMBOL(cur_cpu_spec);
#if defined(CONFIG_ALL_PPC)
extern unsigned long agp_special_page;
......
......@@ -251,16 +251,18 @@ void switch_to(struct task_struct *prev, struct task_struct *new)
void show_regs(struct pt_regs * regs)
{
int i;
int i, trap;
printk("NIP: %08lX XER: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n",
regs->nip, regs->xer, regs->link, regs->gpr[1], regs,regs->trap, print_tainted());
printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n",
regs->nip, regs->link, regs->gpr[1], regs, regs->trap,
print_tainted());
printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
regs->msr&MSR_IR ? 1 : 0,
regs->msr&MSR_DR ? 1 : 0);
if (regs->trap == 0x300 || regs->trap == 0x600)
trap = TRAP(regs);
if (trap == 0x300 || trap == 0x600)
printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
printk("TASK = %p[%d] '%s' ",
current, current->pid, current->comm);
......@@ -280,25 +282,18 @@ void show_regs(struct pt_regs * regs)
#ifdef CONFIG_SMP
printk(" CPU: %d", smp_processor_id());
#endif /* CONFIG_SMP */
printk("\n");
for (i = 0; i < 32; i++)
{
for (i = 0; i < 32; i++) {
long r;
if ((i % 8) == 0)
{
printk("GPR%02d: ", i);
}
if ( __get_user(r, &(regs->gpr[i])) )
goto out;
printk("\n" KERN_INFO "GPR%02d: ", i);
if (__get_user(r, &regs->gpr[i]))
break;
printk("%08lX ", r);
if ((i % 8) == 7)
{
printk("\n");
}
if (i == 12 && !FULL_REGS(regs))
break;
}
out:
printk("\n");
print_backtrace((unsigned long *)regs->gpr[1]);
}
......@@ -336,6 +331,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
unsigned long childframe;
CHECK_FULL_REGS(regs);
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
......@@ -441,18 +437,21 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
return do_fork(p1, regs->gpr[1], regs, 0);
}
int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
return do_fork(SIGCHLD, regs->gpr[1], regs, 0);
}
int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0);
}
......
......@@ -218,14 +218,15 @@ int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 2;
if ((addr & 3) || index > PT_FPSCR)
if ((addr & 3) || index > PT_FPSCR
|| child->thread.regs == NULL)
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
tmp = get_reg(child, (int) index);
} else {
if (child->thread.regs != NULL
&& child->thread.regs->msr & MSR_FP)
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
}
......@@ -243,23 +244,23 @@ int sys_ptrace(long request, long pid, long addr, long data)
break;
/* write the word at location addr in the USER area */
/* XXX this will need fixing for 64-bit */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 2;
if ((addr & 3) || index > PT_FPSCR)
if ((addr & 3) || index > PT_FPSCR
|| child->thread.regs == NULL)
break;
CHECK_FULL_REGS(child->thread.regs);
if (index == PT_ORIG_R3)
break;
if (index < PT_FPR0) {
ret = put_reg(child, index, data);
} else {
if (child->thread.regs != NULL
&& child->thread.regs->msr & MSR_FP)
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
ret = 0;
......
......@@ -44,6 +44,8 @@
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
#endif
extern void sigreturn_exit(struct pt_regs *);
#define GP_REGS_SIZE MIN(sizeof(elf_gregset_t), sizeof(struct pt_regs))
/*
......@@ -111,20 +113,14 @@ sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
regs->gpr[3] = -EINTR;
regs->result = -EINTR;
regs->ccr |= 0x10000000;
regs->gpr[3] = EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(&saveset, regs))
/*
* If a signal handler needs to be called,
* do_signal() has set R3 to the signal number (the
* first argument of the signal handler), so don't
* overwrite that with EINTR !
* In the other cases, do_signal() doesn't touch
* R3, so it's still set to -EINTR (see above).
*/
return regs->gpr[3];
sigreturn_exit(regs);
}
}
......@@ -148,20 +144,22 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, int p3, int p4, int p6,
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
regs->gpr[3] = -EINTR;
regs->result = -EINTR;
regs->ccr |= 0x10000000;
regs->gpr[3] = EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(&saveset, regs))
return regs->gpr[3];
sigreturn_exit(regs);
}
}
int
sys_sigaltstack(const stack_t *uss, stack_t *uoss)
sys_sigaltstack(const stack_t *uss, stack_t *uoss, int r5, int r6,
int r7, int r8, struct pt_regs *regs)
{
struct pt_regs *regs = (struct pt_regs *) &uss;
return do_sigaltstack(uss, uoss, regs->gpr[1]);
}
......@@ -236,16 +234,15 @@ struct rt_sigframe
* Each of these things must be a multiple of 16 bytes in size.
*
*/
int sys_rt_sigreturn(struct pt_regs *regs)
int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
struct pt_regs *regs)
{
struct rt_sigframe *rt_sf;
struct sigcontext_struct sigctx;
struct sigregs *sr;
int ret;
elf_gregset_t saved_regs; /* an array of ELF_NGREG unsigned longs */
sigset_t set;
stack_t st;
unsigned long prevsp;
rt_sf = (struct rt_sigframe *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx))
......@@ -260,50 +257,26 @@ int sys_rt_sigreturn(struct pt_regs *regs)
if (regs->msr & MSR_FP)
giveup_fpu(current);
rt_sf++; /* Look at next rt_sigframe */
if (rt_sf == (struct rt_sigframe *)(sigctx.regs)) {
/* Last stacked signal - restore registers -
* sigctx is initialized to point to the
* preamble frame (where registers are stored)
* see handle_signal()
*/
sr = (struct sigregs *) sigctx.regs;
if (copy_from_user(saved_regs, &sr->gp_regs,
sizeof(sr->gp_regs)))
goto badframe;
saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
| (saved_regs[PT_MSR] & MSR_USERCHANGE);
memcpy(regs, saved_regs, GP_REGS_SIZE);
if (copy_from_user(current->thread.fpr, &sr->fp_regs,
sizeof(sr->fp_regs)))
goto badframe;
/* This function sets back the stack flags into
the current task structure. */
sys_sigaltstack(&st, NULL);
/* restore registers -
* sigctx is initialized to point to the
* preamble frame (where registers are stored)
* see handle_signal()
*/
sr = (struct sigregs *) sigctx.regs;
if (copy_from_user(saved_regs, &sr->gp_regs, sizeof(sr->gp_regs)))
goto badframe;
saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
| (saved_regs[PT_MSR] & MSR_USERCHANGE);
memcpy(regs, saved_regs, GP_REGS_SIZE);
if (copy_from_user(current->thread.fpr, &sr->fp_regs,
sizeof(sr->fp_regs)))
goto badframe;
/* This function sets back the stack flags into
the current task structure. */
sys_sigaltstack(&st, NULL, 0, 0, 0, 0, regs);
ret = regs->result;
} else {
/* More signals to go */
/* Set up registers for next signal handler */
regs->gpr[1] = (unsigned long)rt_sf - __SIGNAL_FRAMESIZE;
if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx)))
goto badframe;
sr = (struct sigregs *) sigctx.regs;
regs->gpr[3] = ret = sigctx.signal;
/* Get the siginfo */
get_user(regs->gpr[4], (unsigned long *)&rt_sf->pinfo);
/* Get the ucontext */
get_user(regs->gpr[5], (unsigned long *)&rt_sf->puc);
regs->gpr[6] = (unsigned long) rt_sf;
regs->link = (unsigned long) &sr->tramp;
regs->nip = sigctx.handler;
if (get_user(prevsp, &sr->gp_regs[PT_R1])
|| put_user(prevsp, (unsigned long *) regs->gpr[1]))
goto badframe;
current->thread.fpscr = 0;
}
return ret;
sigreturn_exit(regs); /* doesn't return here */
return 0;
badframe:
do_exit(SIGSEGV);
......@@ -318,6 +291,7 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
/* Set up preamble frame */
if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
goto badframe;
CHECK_FULL_REGS(regs);
if (regs->msr & MSR_FP)
giveup_fpu(current);
if (__copy_to_user(&frame->gp_regs, regs, GP_REGS_SIZE)
......@@ -327,7 +301,7 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
It calls the sc exception at offset 0x9999
for sys_rt_sigreturn().
*/
|| __put_user(0x38006666UL, &frame->tramp[0]) /* li r0,0x6666 */
|| __put_user(0x38000000UL + __NR_rt_sigreturn, &frame->tramp[0])
|| __put_user(0x44000002UL, &frame->tramp[1])) /* sc */
goto badframe;
flush_icache_range((unsigned long) &frame->tramp[0],
......@@ -362,14 +336,13 @@ setup_rt_frame(struct pt_regs *regs, struct sigregs *frame,
/*
* Do a signal return; undo the signal stack.
*/
int sys_sigreturn(struct pt_regs *regs)
int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
struct pt_regs *regs)
{
struct sigcontext_struct *sc, sigctx;
struct sigregs *sr;
int ret;
elf_gregset_t saved_regs; /* an array of ELF_NGREG unsigned longs */
sigset_t set;
unsigned long prevsp;
sc = (struct sigcontext_struct *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
......@@ -387,40 +360,20 @@ int sys_sigreturn(struct pt_regs *regs)
if (regs->msr & MSR_FP )
giveup_fpu(current);
sc++; /* Look at next sigcontext */
if (sc == (struct sigcontext_struct *)(sigctx.regs)) {
/* Last stacked signal - restore registers */
sr = (struct sigregs *) sigctx.regs;
if (copy_from_user(saved_regs, &sr->gp_regs,
sizeof(sr->gp_regs)))
goto badframe;
saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
| (saved_regs[PT_MSR] & MSR_USERCHANGE);
memcpy(regs, saved_regs, GP_REGS_SIZE);
/* restore registers */
sr = (struct sigregs *) sigctx.regs;
if (copy_from_user(saved_regs, &sr->gp_regs, sizeof(sr->gp_regs)))
goto badframe;
saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE)
| (saved_regs[PT_MSR] & MSR_USERCHANGE);
memcpy(regs, saved_regs, GP_REGS_SIZE);
if (copy_from_user(current->thread.fpr, &sr->fp_regs,
sizeof(sr->fp_regs)))
goto badframe;
if (copy_from_user(current->thread.fpr, &sr->fp_regs,
sizeof(sr->fp_regs)))
goto badframe;
ret = regs->result;
} else {
/* More signals to go */
regs->gpr[1] = (unsigned long)sc - __SIGNAL_FRAMESIZE;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
sr = (struct sigregs *) sigctx.regs;
regs->gpr[3] = ret = sigctx.signal;
regs->gpr[4] = (unsigned long) sc;
regs->link = (unsigned long) &sr->tramp;
regs->nip = sigctx.handler;
if (get_user(prevsp, &sr->gp_regs[PT_R1])
|| put_user(prevsp, (unsigned long *) regs->gpr[1]))
goto badframe;
current->thread.fpscr = 0;
}
return ret;
sigreturn_exit(regs); /* doesn't return here */
return 0;
badframe:
do_exit(SIGSEGV);
......@@ -437,12 +390,13 @@ setup_frame(struct pt_regs *regs, struct sigregs *frame,
if (verify_area(VERIFY_WRITE, frame, sizeof(*frame)))
goto badframe;
CHECK_FULL_REGS(regs);
if (regs->msr & MSR_FP)
giveup_fpu(current);
if (__copy_to_user(&frame->gp_regs, regs, GP_REGS_SIZE)
|| __copy_to_user(&frame->fp_regs, current->thread.fpr,
ELF_NFPREG * sizeof(double))
|| __put_user(0x38007777UL, &frame->tramp[0]) /* li r0,0x7777 */
|| __put_user(0x38000000UL + __NR_sigreturn, &frame->tramp[0])
|| __put_user(0x44000002UL, &frame->tramp[1])) /* sc */
goto badframe;
flush_icache_range((unsigned long) &frame->tramp[0],
......@@ -479,11 +433,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
struct sigcontext_struct *sc;
struct rt_sigframe *rt_sf;
if (regs->trap == 0x0C00 /* System Call! */
if (TRAP(regs) == 0x0C00 /* System Call! */
&& ((int)regs->result == -ERESTARTNOHAND ||
((int)regs->result == -ERESTARTSYS &&
!(ka->sa.sa_flags & SA_RESTART))))
!(ka->sa.sa_flags & SA_RESTART)))) {
regs->result = -EINTR;
regs->gpr[3] = EINTR;
regs->ccr |= 0x10000000;
}
/* Set up Signal Frame */
if (ka->sa.sa_flags & SA_SIGINFO) {
......@@ -511,7 +468,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
|| __put_user(sig, &rt_sf->uc.uc_mcontext.signal))
goto badframe;
} else {
/* Put another sigcontext on the stack */
/* Put a sigcontext on the stack */
*newspp -= sizeof(*sc);
sc = (struct sigcontext_struct *) *newspp;
if (verify_area(VERIFY_WRITE, sc, sizeof(*sc)))
......@@ -665,7 +622,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
break;
}
if (regs->trap == 0x0C00 /* System Call! */ &&
if (TRAP(regs) == 0x0C00 /* System Call! */ &&
((int)regs->result == -ERESTARTNOHAND ||
(int)regs->result == -ERESTARTSYS ||
(int)regs->result == -ERESTARTNOINTR)) {
......
......@@ -172,6 +172,7 @@ MachineCheckException(struct pt_regs *regs)
printk(KERN_DEBUG "%s bad port %lx at %p\n",
(*nip & 0x100)? "OUT to": "IN from",
regs->gpr[rb] - _IO_BASE, nip);
regs->msr |= MSR_RI;
regs->nip = fixup;
return;
}
......@@ -223,7 +224,7 @@ SMIException(struct pt_regs *regs)
void
UnknownException(struct pt_regs *regs)
{
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx %s\n",
printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
regs->nip, regs->msr, regs->trap, print_tainted());
_exception(SIGTRAP, regs);
}
......@@ -266,6 +267,7 @@ emulate_instruction(struct pt_regs *regs)
if (!user_mode(regs))
return retval;
CHECK_FULL_REGS(regs);
if (get_user(instword, (uint *)(regs->nip)))
return -EFAULT;
......@@ -366,6 +368,14 @@ StackOverflow(struct pt_regs *regs)
panic("kernel stack overflow");
}
void nonrecoverable_exception(struct pt_regs *regs)
{
printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
regs->nip, regs->msr);
debugger(regs);
die("nonrecoverable exception", regs, SIGKILL);
}
void
trace_syscall(struct pt_regs *regs)
{
......@@ -382,6 +392,8 @@ SoftwareEmulation(struct pt_regs *regs)
extern int Soft_emulate_8xx(struct pt_regs *);
int errcode;
CHECK_FULL_REGS(regs);
if (!user_mode(regs)) {
debugger(regs);
die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
......@@ -423,7 +435,7 @@ void DebugException(struct pt_regs *regs)
} else if (debug_status & DBSR_IC) { /* instruction completion */
mtspr(SPRN_DBSR, DBSR_IC);
regs->dbcr0 &= ~DBCR0_IC;
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
if (!user_mode(regs) && debugger_sstep(regs))
return;
......@@ -436,7 +448,7 @@ void DebugException(struct pt_regs *regs)
void
TAUException(struct pt_regs *regs)
{
printk("TAU trap at PC: %lx, SR: %lx, vector=%lx %s\n",
printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
regs->nip, regs->msr, regs->trap, print_tainted());
}
#endif /* CONFIG_INT_TAU */
......
......@@ -37,6 +37,7 @@
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
extern void (*debugger)(struct pt_regs *);
......@@ -81,14 +82,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* bits we are interested in. But there are some bits which
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (regs->trap == 0x400)
if (TRAP(regs) == 0x400)
error_code &= 0x48200000;
else
is_write = error_code & 0x02000000;
#endif /* CONFIG_4xx */
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_fault_handler && regs->trap == 0x300) {
if (debugger_fault_handler && TRAP(regs) == 0x300) {
debugger_fault_handler(regs);
return;
}
......@@ -140,7 +141,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
goto bad_area;
#if defined(CONFIG_4xx)
/* an exec - 4xx allows for per-page execute permission */
} else if (regs->trap == 0x400) {
} else if (TRAP(regs) == 0x400) {
pte_t *ptep;
#if 0
......@@ -159,8 +160,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
struct page *page = pte_page(*ptep);
if (! test_bit(PG_arch_1, &page->flags)) {
__flush_dcache_icache((unsigned long)kmap(page));
kunmap(page);
unsigned long phys = page_to_pfn(page) << PAGE_SHIFT;
__flush_dcache_icache_phys(phys);
set_bit(PG_arch_1, &page->flags);
}
pte_update(ptep, 0, _PAGE_HWEXEC);
......
......@@ -43,13 +43,13 @@
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
* _PAGE_RW (0x400) if a write.
* r23 contains the SRR1 value, from which we use the MSR_PR bit.
* r9 contains the SRR1 value, from which we use the MSR_PR bit.
* SPRG3 contains the physical address of the current task's thread.
*
* Returns to the caller if the access is illegal or there is no
* mapping for the address. Otherwise it places an appropriate PTE
* in the hash table and returns from the exception.
* Uses r0, r2 - r7, ctr, lr.
* Uses r0, r3 - r8, ctr, lr.
*/
.text
.globl hash_page
......@@ -62,34 +62,34 @@ hash_page:
#endif
tophys(r7,0) /* gets -KERNELBASE into r7 */
#ifdef CONFIG_SMP
addis r2,r7,mmu_hash_lock@h
ori r2,r2,mmu_hash_lock@l
addis r8,r7,mmu_hash_lock@h
ori r8,r8,mmu_hash_lock@l
lis r0,0x0fff
b 10f
11: lwz r6,0(r2)
11: lwz r6,0(r8)
cmpwi 0,r6,0
bne 11b
10: lwarx r6,0,r2
10: lwarx r6,0,r8
cmpwi 0,r6,0
bne- 11b
stwcx. r0,0,r2
stwcx. r0,0,r8
bne- 10b
isync
#endif
/* Get PTE (linux-style) and check access */
lis r0,KERNELBASE@h /* check if kernel address */
cmplw 0,r4,r0
mfspr r2,SPRG3 /* current task's THREAD (phys) */
mfspr r8,SPRG3 /* current task's THREAD (phys) */
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
lwz r5,PGDIR(r2) /* virt page-table root */
lwz r5,PGDIR(r8) /* virt page-table root */
blt+ 112f /* assume user more likely */
lis r5,swapper_pg_dir@ha /* if kernel address, use */
addi r5,r5,swapper_pg_dir@l /* kernel page table */
rlwimi r3,r23,32-12,29,29 /* MSR_PR -> _PAGE_USER */
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
112: add r5,r5,r7 /* convert to phys addr */
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r5) /* get pmd entry */
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
lwz r8,0(r5) /* get pmd entry */
rlwinm. r8,r8,0,0,19 /* extract address of pte page */
#ifdef CONFIG_SMP
beq- hash_page_out /* return if no mapping */
#else
......@@ -99,7 +99,7 @@ hash_page:
to the address following the rfi. */
beqlr-
#endif
rlwimi r2,r4,22,20,29 /* insert next 10 bits of address */
rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
......@@ -110,7 +110,7 @@ hash_page:
* to update the PTE to set _PAGE_HASHPTE. -- paulus.
*/
retry:
lwarx r6,0,r2 /* get linux-style pte */
lwarx r6,0,r8 /* get linux-style pte */
andc. r5,r3,r6 /* check access & ~permission */
#ifdef CONFIG_SMP
bne- hash_page_out /* return if access not permitted */
......@@ -118,13 +118,13 @@ retry:
bnelr-
#endif
or r5,r0,r6 /* set accessed/dirty bits */
stwcx. r5,0,r2 /* attempt to update PTE */
stwcx. r5,0,r8 /* attempt to update PTE */
bne- retry /* retry if someone got there first */
mfsrin r3,r4 /* get segment reg for segment */
mr r2,r8 /* we have saved r2 but not r8 */
mfctr r0
stw r0,_CTR(r11)
bl create_hpte /* add the hash table entry */
mr r8,r2
/*
* htab_reloads counts the number of times we have to fault an
......@@ -134,48 +134,34 @@ retry:
* update_mmu_cache gets called to put the HPTE into the hash table
* and those are counted as preloads rather than reloads.
*/
addis r2,r7,htab_reloads@ha
lwz r3,htab_reloads@l(r2)
addis r8,r7,htab_reloads@ha
lwz r3,htab_reloads@l(r8)
addi r3,r3,1
stw r3,htab_reloads@l(r2)
stw r3,htab_reloads@l(r8)
#ifdef CONFIG_SMP
eieio
addis r2,r7,mmu_hash_lock@ha
addis r8,r7,mmu_hash_lock@ha
li r0,0
stw r0,mmu_hash_lock@l(r2)
stw r0,mmu_hash_lock@l(r8)
#endif
/* Return from the exception */
lwz r3,_CCR(r21)
lwz r4,_LINK(r21)
lwz r5,_CTR(r21)
mtcrf 0xff,r3
lwz r4,_LINK(r11)
lwz r5,_CTR(r11)
mtlr r4
mtctr r5
lwz r0,GPR0(r21)
lwz r1,GPR1(r21)
lwz r2,GPR2(r21)
lwz r3,GPR3(r21)
lwz r4,GPR4(r21)
lwz r5,GPR5(r21)
lwz r6,GPR6(r21)
lwz r7,GPR7(r21)
/* we haven't used xer */
mtspr SRR1,r23
mtspr SRR0,r22
lwz r20,GPR20(r21)
lwz r22,GPR22(r21)
lwz r23,GPR23(r21)
lwz r21,GPR21(r21)
RFI
lwz r0,GPR0(r11)
lwz r7,GPR7(r11)
lwz r8,GPR8(r11)
b fast_exception_return
#ifdef CONFIG_SMP
hash_page_out:
eieio
addis r2,r7,mmu_hash_lock@ha
addis r8,r7,mmu_hash_lock@ha
li r0,0
stw r0,mmu_hash_lock@l(r2)
stw r0,mmu_hash_lock@l(r8)
blr
#endif /* CONFIG_SMP */
......
......@@ -15,6 +15,7 @@
#include <asm/prom.h>
#include <asm/bitops.h>
#include <asm/bootx.h>
#include <asm/machdep.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
......@@ -105,6 +106,7 @@ static void cpu_cmd(void);
#endif /* CONFIG_SMP */
static int pretty_print_addr(unsigned long addr);
static void csum(void);
static void bootcmds(void);
extern int print_insn_big_powerpc(FILE *, unsigned long, unsigned);
extern void printf(const char *fmt, ...);
......@@ -482,10 +484,26 @@ cmds(struct pt_regs *excp)
cpu_cmd();
break;
#endif /* CONFIG_SMP */
case 'z':
bootcmds();
break;
}
}
}
static void bootcmds(void)
{
int cmd;
cmd = inchar();
if (cmd == 'r')
ppc_md.restart(NULL);
else if (cmd == 'h')
ppc_md.halt();
else if (cmd == 'p')
ppc_md.power_off();
}
#ifdef CONFIG_SMP
static void cpu_cmd(void)
{
......@@ -670,7 +688,7 @@ bpt_cmds(void)
printf("r");
if (dabr.address & 2)
printf("w");
if (dabr.address & 4)
if (!(dabr.address & 4))
printf("p");
printf("]\n");
}
......@@ -707,8 +725,7 @@ backtrace(struct pt_regs *excp)
unsigned sp;
unsigned stack[2];
struct pt_regs regs;
extern char ret_from_intercept, ret_from_syscall_1, ret_from_syscall_2;
extern char ret_from_except;
extern char ret_from_except, ret_from_except_full, ret_from_syscall;
printf("backtrace:\n");
......@@ -723,10 +740,9 @@ backtrace(struct pt_regs *excp)
break;
pretty_print_addr(stack[1]);
printf(" ");
if (stack[1] == (unsigned) &ret_from_intercept
|| stack[1] == (unsigned) &ret_from_except
|| stack[1] == (unsigned) &ret_from_syscall_1
|| stack[1] == (unsigned) &ret_from_syscall_2) {
if (stack[1] == (unsigned) &ret_from_except
|| stack[1] == (unsigned) &ret_from_except_full
|| stack[1] == (unsigned) &ret_from_syscall) {
if (mread(sp+16, &regs, sizeof(regs)) != sizeof(regs))
break;
printf("\nexception:%x [%x] %x ", regs.trap, sp+16,
......@@ -751,6 +767,8 @@ getsp()
void
excprint(struct pt_regs *fp)
{
int trap;
#ifdef CONFIG_SMP
printf("cpu %d: ", smp_processor_id());
#endif /* CONFIG_SMP */
......@@ -759,7 +777,8 @@ excprint(struct pt_regs *fp)
printf(", lr = ");
pretty_print_addr(fp->link);
printf("\nmsr = %x, sp = %x [%x]\n", fp->msr, fp->gpr[1], fp);
if (fp->trap == 0x300 || fp->trap == 0x600)
trap = TRAP(fp);
if (trap == 0x300 || trap == 0x600)
printf("dar = %x, dsisr = %x\n", fp->dar, fp->dsisr);
if (current)
printf("current = %x, pid = %d, comm = %s\n",
......@@ -774,9 +793,14 @@ prregs(struct pt_regs *fp)
if (scanhex(&base))
fp = (struct pt_regs *) base;
for (n = 0; n < 32; ++n)
for (n = 0; n < 32; ++n) {
printf("R%.2d = %.8x%s", n, fp->gpr[n],
(n & 3) == 3? "\n": " ");
if (n == 12 && !FULL_REGS(fp)) {
printf("\n");
break;
}
}
printf("pc = %.8x msr = %.8x lr = %.8x cr = %.8x\n",
fp->nip, fp->msr, fp->link, fp->ccr);
printf("ctr = %.8x xer = %.8x trap = %4x\n",
......@@ -1160,7 +1184,7 @@ static char *fault_chars[] = { "--", "**", "##" };
static void
handle_fault(struct pt_regs *regs)
{
fault_type = regs->trap == 0x200? 0: regs->trap == 0x300? 1: 2;
fault_type = TRAP(regs) == 0x200? 0: TRAP(regs) == 0x300? 1: 2;
longjmp(bus_error_jmp, 1);
}
......
......@@ -8,7 +8,6 @@
#include <linux/config.h>
#include <asm/smp.h>
/* entry.S is sensitive to the offsets of these fields */
/* The __last_jiffy_stamp field is needed to ensure that no decrementer
* interrupt is lost on SMP machines. Since on most CPUs it is in the same
* cache line as local_irq_count, it is cheap to access and is also used on UP
......@@ -40,8 +39,8 @@ typedef struct {
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define hardirq_enter(cpu) (local_irq_count(cpu)++)
#define hardirq_exit(cpu) (local_irq_count(cpu)--)
#define hardirq_enter(cpu) do { preempt_disable(); local_irq_count(cpu)++; } while (0)
#define hardirq_exit(cpu) do { local_irq_count(cpu)--; preempt_enable(); } while (0)
#define synchronize_irq() do { } while (0)
#define release_irqlock(cpu) do { } while (0)
......@@ -75,7 +74,8 @@ static inline void release_irqlock(int cpu)
static inline void hardirq_enter(int cpu)
{
unsigned int loops = 10000000;
preempt_disable();
++local_irq_count(cpu);
while (test_bit(0,&global_irq_lock)) {
if (cpu == global_irq_holder) {
......@@ -97,6 +97,7 @@ static inline void hardirq_enter(int cpu)
static inline void hardirq_exit(int cpu)
{
--local_irq_count(cpu);
preempt_enable();
}
static inline int hardirq_trylock(int cpu)
......
......@@ -31,6 +31,11 @@
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
SAVE_10GPRS(22, base)
#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
REST_10GPRS(22, base)
#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
......
......@@ -49,12 +49,12 @@
#define MSR_LE (1<<0) /* Little Endian */
#ifdef CONFIG_APUS_FAST_EXCEPT
#define MSR_ MSR_ME|MSR_IP|MSR_RI
#define MSR_ (MSR_ME|MSR_IP|MSR_RI)
#else
#define MSR_ MSR_ME|MSR_RI
#define MSR_ (MSR_ME|MSR_RI)
#endif
#define MSR_KERNEL MSR_|MSR_IR|MSR_DR
#define MSR_USER MSR_KERNEL|MSR_PR|MSR_EE
#define MSR_KERNEL (MSR_|MSR_IR|MSR_DR)
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
/* Floating Point Status and Control Register (FPSCR) Fields */
......
......@@ -33,14 +33,16 @@ struct pt_regs {
unsigned long mq; /* 601 only (not used at present) */
/* Used on APUS to hold IPL value. */
unsigned long trap; /* Reason for being here */
/* N.B. for critical exceptions on 4xx, the dar and dsisr
fields are overloaded to hold srr0 and srr1. */
unsigned long dar; /* Fault registers */
unsigned long dsisr;
unsigned long result; /* Result of a system call */
};
#endif
/* iSeries uses mq field for soft enable flag */
#define softEnable mq
#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
......@@ -48,9 +50,28 @@ struct pt_regs {
/* Size of stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 64
#ifndef __ASSEMBLY__
#define instruction_pointer(regs) ((regs)->nip)
#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
/*
* We use the least-significant bit of the trap field to indicate
* whether we have saved the full set of registers, or only a
* partial set. A 1 there means the partial set.
* On 4xx we use the next bit to indicate whether the exception
* is a critical exception (1 means it is).
*/
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0)
#define TRAP(regs) ((regs)->trap & ~0xF)
#define CHECK_FULL_REGS(regs) \
do { \
if ((regs)->trap & 1) \
printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
} while (0)
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
/*
......
/*
* BK Id: SCCS/s.smplock.h 1.10 10/23/01 08:09:35 trini
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* <asm/smplock.h>
......@@ -15,26 +15,28 @@
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#elif defined(CONFIG_PREEMPT)
#define kernel_locked() preempt_get_count()
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
......@@ -47,8 +49,14 @@ do { \
*/
static __inline__ void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#endif /* CONFIG_PREEMPT */
}
static __inline__ void unlock_kernel(void)
......
/*
* BK Id: SCCS/s.softirq.h 1.13 07/12/01 20:02:34 paulus
* BK Id: %F% %I% %G% %U% %#%
*/
#ifdef __KERNEL__
#ifndef __ASM_SOFTIRQ_H
......@@ -10,6 +10,7 @@
#define local_bh_disable() \
do { \
preempt_disable(); \
local_bh_count(smp_processor_id())++; \
barrier(); \
} while (0)
......@@ -18,14 +19,17 @@ do { \
do { \
barrier(); \
local_bh_count(smp_processor_id())--; \
preempt_enable(); \
} while (0)
#define local_bh_enable() \
do { \
barrier(); \
if (!--local_bh_count(smp_processor_id()) \
&& softirq_pending(smp_processor_id())) { \
do_softirq(); \
} \
preempt_enable(); \
} while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
......
......@@ -23,6 +23,8 @@ struct thread_info {
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count; /* not used at present */
int softirq_count;
int hardirq_count;
};
/*
......@@ -67,6 +69,9 @@ static inline struct thread_info *current_thread_info(void)
#define TI_EXECDOMAIN 4
#define TI_FLAGS 8
#define TI_CPU 12
#define TI_PREEMPT 16
#define TI_SOFTIRQ 20
#define TI_HARDIRQ 24
#define PREEMPT_ACTIVE 0x4000000
......
......@@ -234,7 +234,6 @@
#define __NR(n) #n
#define __syscall_return(type) \
return (__sc_err & 0x10000000 ? errno = __sc_ret, __sc_ret = -1 : 0), \
(type) __sc_ret
......@@ -403,8 +402,9 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
__syscall_return (type); \
}
#ifdef __KERNEL__
#ifdef __KERNEL_SYSCALLS__
#define __NR__exit __NR_exit
/*
* Forking from kernel space will result in the child getting a new,
......@@ -414,29 +414,24 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
* the child.
*/
#ifdef __KERNEL_SYSCALLS__
/*
* System call prototypes.
*/
#define __NR__exit __NR_exit
static inline _syscall0(int,pause)
static inline _syscall0(int,sync)
static inline _syscall0(pid_t,setsid)
static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
static inline _syscall1(int,close,int,fd)
static inline _syscall1(int,_exit,int,exitcode)
static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
static inline _syscall1(int,delete_module,const char *,name)
extern pid_t setsid(void);
extern int write(int fd, const char *buf, off_t count);
extern int read(int fd, char *buf, off_t count);
extern off_t lseek(int fd, off_t offset, int count);
extern int dup(int fd);
extern int execve(const char *file, char **argv, char **envp);
extern int open(const char *file, int flag, int mode);
extern int close(int fd);
extern pid_t waitpid(pid_t pid, int *wait_stat, int options);
static inline pid_t wait(int * wait_stat)
{
return waitpid(-1, wait_stat, 0);
}
#endif /* __KERNEL_SYSCALLS__ */
/*
......@@ -447,4 +442,6 @@ static inline pid_t wait(int * wait_stat)
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
#endif /* __KERNEL__ */
#endif /* _ASM_PPC_UNISTD_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment