Commit 5f8a8bf5 authored by Paul Mackerras's avatar Paul Mackerras

PPC32: Better support for PPC 4xx debug facilities.

This provides for separate global and per-thread debug control
register value(s), which are switched as appropriate.  This allows
us to use both an external JTAG debugger for debugging the kernel
as well as using gdb to debug user programs.
parent 870e5a00
......@@ -36,6 +36,7 @@ main(void)
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
......@@ -43,6 +44,10 @@ main(void)
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
#ifdef CONFIG_4xx
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
DEFINE(PT_PTRACED, PT_PTRACED);
#endif
#ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
......
......@@ -34,6 +34,25 @@
#undef SHOW_SYSCALLS
#undef SHOW_SYSCALLS_TASK
/*
* MSR_KERNEL is > 0x10000 on 4xx since it include MSR_CE.
*/
#if MSR_KERNEL >= 0x10000
#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
#else
#define LOAD_MSR_KERNEL(r, x) li r,(x)
#endif
#ifdef CONFIG_4xx
.globl crit_transfer_to_handler
crit_transfer_to_handler:
lwz r0,crit_r10@l(0)
stw r0,GPR10(r11)
lwz r0,crit_r11@l(0)
stw r0,GPR11(r11)
/* fall through */
#endif
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
......@@ -51,6 +70,7 @@ transfer_to_handler:
stw r2,GPR2(r11)
stw r12,_NIP(r11)
stw r9,_MSR(r11)
andi. r2,r9,MSR_PR
mfctr r12
mfspr r2,XER
stw r12,_CTR(r11)
......@@ -61,6 +81,22 @@ transfer_to_handler:
beq 2f /* if from user, fix up THREAD.regs */
addi r11,r1,STACK_FRAME_OVERHEAD
stw r11,PT_REGS(r12)
#ifdef CONFIG_4xx
lwz r12,PTRACE-THREAD(r12)
andi. r12,r12,PT_PTRACED
beq+ 3f
/* From user and task is ptraced - load up global dbcr0 */
li r12,-1 /* clear all pending debug events */
mtspr SPRN_DBSR,r12
lis r11,global_dbcr0@ha
tophys(r11,r11)
addi r11,r11,global_dbcr0@l
lwz r12,0(r11)
mtspr SPRN_DBCR0,r12
lwz r12,4(r11)
addi r12,r12,-1
stw r12,4(r11)
#endif
b 3f
2: /* if from kernel, check interrupted DOZE/NAP mode and
* check for stack overflow
......@@ -96,15 +132,19 @@ transfer_to_handler_cont:
* and call StackOverflow(regs), which should not return.
*/
stack_ovf:
/* sometimes we use a statically-allocated stack, which is OK. */
lis r11,_end@h
ori r11,r11,_end@l
cmplw r1,r11
ble 3b /* r1 <= &_end is OK */
SAVE_NVGPRS(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
tovirt(r2,r2) /* set r2 to current */
lis r1,init_thread_union@ha
addi r1,r1,init_thread_union@l
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
lis r9,StackOverflow@ha
addi r9,r9,StackOverflow@l
li r10,MSR_KERNEL
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
FIX_SRR1(r10,r12)
mtspr SRR0,r9
mtspr SRR1,r10
......@@ -161,7 +201,7 @@ ret_from_syscall:
stw r10,_CCR(r1)
/* disable interrupts so current_thread_info()->flags can't change */
30: li r10,MSR_KERNEL /* doesn't include MSR_EE */
30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
SYNC
MTMSRD(r10)
rlwinm r12,r1,0,0,18 /* current_thread_info() */
......@@ -169,6 +209,12 @@ ret_from_syscall:
andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne- syscall_exit_work
syscall_exit_cont:
#ifdef CONFIG_4xx
/* If the process has its own DBCR0 value, load it up */
lwz r0,PTRACE(r2)
andi. r0,r0,PT_PTRACED
bnel- load_4xx_dbcr0
#endif
stwcx. r0,0,r1 /* to clear the reservation */
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
......@@ -215,7 +261,6 @@ syscall_exit_work:
stw r3,GPR3(r1) /* Update return value */
andi. r0,r9,_TIF_SYSCALL_TRACE
beq 5f
stw r6,GPR0(r1) /* temporary gross hack to make strace work */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* re-enable interrupts */
......@@ -230,7 +275,7 @@ syscall_exit_work:
REST_NVGPRS(r1)
2:
lwz r3,GPR3(r1)
li r10,MSR_KERNEL /* doesn't include MSR_EE */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
SYNC
MTMSRD(r10) /* disable interrupts again */
rlwinm r12,r1,0,0,18 /* current_thread_info() */
......@@ -243,10 +288,7 @@ syscall_exit_work:
beq syscall_exit_cont
andi. r0,r9,_TIF_SIGPENDING
beq syscall_exit_cont
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* re-enable interrupts */
b syscall_do_signal
b do_user_signal
1:
ori r10,r10,MSR_EE
SYNC
......@@ -459,7 +501,7 @@ ret_from_except:
/* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt. */
li r10,MSR_KERNEL /* doesn't include EE */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC /* Some chip revs have problems here... */
MTMSRD(r10) /* disable interrupts */
......@@ -467,23 +509,33 @@ ret_from_except:
andi. r3,r3,MSR_PR
beq resume_kernel
user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
rlwinm r9,r1,0,0,18
lwz r9,TI_FLAGS(r9)
andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne do_work
restore_user:
#ifdef CONFIG_4xx
/* Check whether this process has its own DBCR0 value */
lwz r0,PTRACE(r2)
andi. r0,r0,PT_PTRACED
bnel- load_4xx_dbcr0
#endif
#ifdef CONFIG_PREEMPT
b restore
resume_kernel:
rlwinm r9,r1,0,0,18 /* check current_thread_info->preempt_count */
/* check current_thread_info->preempt_count */
rlwinm r9,r1,0,0,18
lwz r3,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
cmpwi 0,r3,0 /* if non-zero, just restore regs and return */
bne restore
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED
bne do_work
bne do_resched
#else
resume_kernel:
#endif /* CONFIG_PREEMPT */
......@@ -522,7 +574,7 @@ restore:
* can restart the exception exit path at the label
* exc_exit_restart below. -- paulus
*/
li r10,MSR_KERNEL & ~MSR_RI
LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
SYNC
MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart
......@@ -542,18 +594,14 @@ exc_exit_restart_end:
#else /* CONFIG_4xx */
/*
* This is a bit different on 4xx because 4xx doesn't have
* the RI bit in the MSR, and because we have critical
* exceptions, for which we need to restore SRR0 and SRR1
* and then use SRR2/SRR3 to return from the exception.
* the RI bit in the MSR.
* The TLB miss handler checks if we have interrupted
* the exception exit path and restarts it if so.
* the exception exit path and restarts it if so
* (well maybe one day it will... :).
*/
lwz r10,TRAP(r1) /* check for critical exception */
lwz r11,_LINK(r1)
andi. r10,r10,2
mtlr r11
lwz r10,_CCR(r1)
bne crit_exc_exit
mtcrf 0xff,r10
REST_2GPRS(9, r1)
.globl exc_exit_restart
......@@ -570,7 +618,43 @@ exc_exit_restart_end:
PPC405_ERR77_SYNC
rfi
b . /* prevent prefetch past rfi */
crit_exc_exit:
/*
* Returning from a critical interrupt in user mode doesn't need
* to be any different from a normal exception. For a critical
* interrupt in the kernel, we just return (without checking for
* preemption) since the interrupt may have happened at some crucial
* place (e.g. inside the TLB miss handler), and because we will be
* running with r1 pointing into critical_stack, not the current
* process's kernel stack (and therefore current_thread_info() will
* give the wrong answer).
* We have to restore various SPRs that may have been in use at the
* time of the critical interrupt.
*/
.globl ret_from_crit_exc
ret_from_crit_exc:
REST_NVGPRS(r1)
lwz r3,_MSR(r1)
andi. r3,r3,MSR_PR
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
bne user_exc_return
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
REST_2GPRS(7, r1)
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
mtspr XER,r10
mtctr r11
PPC405_ERR77(0,r1)
stwcx. r0,0,r1 /* to clear the reservation */
lwz r11,_LINK(r1)
mtlr r11
lwz r10,_CCR(r1)
mtcrf 0xff,r10
/* avoid any possible TLB misses here by turning off MSR.DR, we
* assume the instructions here are mapped by a pinned TLB entry */
......@@ -606,40 +690,68 @@ crit_exc_exit:
mtspr SRR1,r10
lwz r10,crit_pid@l(0)
mtspr SPRN_PID,r10
lwz r10,crit_r10@l(0)
lwz r11,crit_r11@l(0)
lwz r10,GPR10(r1)
lwz r11,GPR11(r1)
lwz r1,GPR1(r1)
PPC405_ERR77_SYNC
rfci
b . /* prevent prefetch past rfci */
/*
* Load the DBCR0 value for a task that is being ptraced,
* having first saved away the global DBCR0.
*/
load_4xx_dbcr0:
mfmsr r0 /* first disable debug exceptions */
rlwinm r0,r0,0,~MSR_DE
mtmsr r0
isync
mfspr r10,SPRN_DBCR0
lis r11,global_dbcr0@ha
addi r11,r11,global_dbcr0@l
lwz r0,THREAD+THREAD_DBCR0(r2)
stw r10,0(r11)
mtspr SPRN_DBCR0,r0
lwz r10,4(r11)
addi r10,r10,1
stw r10,4(r11)
li r11,-1
mtspr SPRN_DBSR,r11 /* clear all pending debug events */
blr
.comm global_dbcr0,8
#endif /* CONFIG_4xx */
do_work: /* r10 contains MSR_KERNEL here */
andi. r0,r9,_TIF_NEED_RESCHED
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
bl schedule
recheck:
li r10,MSR_KERNEL
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
rlwinm r9,r1,0,0,18
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched
#ifdef CONFIG_PREEMPT
lwz r0,_MSR(r1)
li r11,_TIF_NEED_RESCHED
/* move MSR_PR bit down to TIF_SIGPENDING (0x4) bit */
rlwimi r11,r0,18+TIF_SIGPENDING,31-TIF_SIGPENDING,31-TIF_SIGPENDING
and. r0,r9,r11
#else /* CONFIG_PREEMPT */
andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
#endif /* CONFIG_PREEMPT */
andi. r0,r0,MSR_PR
beq restore
do_work:
#endif
andi. r0,r9,_TIF_SIGPENDING
beq restore_user
do_user_signal: /* r10 contains MSR_KERNEL here */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
andi. r0,r9,_TIF_NEED_RESCHED
beq 1f
bl schedule
b recheck
1:
syscall_do_signal:
/* save r13-r31 in the exception frame, if not already done */
lwz r3,TRAP(r1)
andi. r0,r3,1
......@@ -715,7 +827,7 @@ _GLOBAL(enter_rtas)
lwz r8,rtas_entry@l(r8)
mfmsr r9
stw r9,8(r1)
li r0,MSR_KERNEL
LOAD_MSR_KERNEL(r0,MSR_KERNEL)
SYNC /* disable interrupts so SRR0/1 */
MTMSRD(r0) /* don't get trashed */
li r9,MSR_
......
......@@ -401,7 +401,6 @@ DataAccess:
mfspr r4,DAR
stw r4,_DAR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
andi. r0,r9,MSR_PR /* set cr0.eq if from kernel */
EXC_XFER_EE_LITE(0x300, do_page_fault)
#ifdef CONFIG_PPC64BRIDGE
......@@ -427,7 +426,6 @@ InstructionAccess:
1: addi r3,r1,STACK_FRAME_OVERHEAD
mr r4,r12
mr r5,r9
andi. r0,r9,MSR_PR /* set cr0.eq if from kernel */
EXC_XFER_EE_LITE(0x400, do_page_fault)
#ifdef CONFIG_PPC64BRIDGE
......
......@@ -75,7 +75,8 @@ _GLOBAL(_start)
* ready to work.
*/
turn_on_mmu:
li r0,MSR_KERNEL
lis r0,MSR_KERNEL@h
ori r0,r0,MSR_KERNEL@l
mtspr SRR1,r0
lis r0,start_here@h
ori r0,r0,start_here@l
......@@ -213,42 +214,21 @@ _GLOBAL(crit_srr1)
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
#if 0
#define CRITICAL_EXCEPTION_PROLOG \
stw r10,crit_save@l(0); /* save two registers to work with */\
stw r11,4+crit_save@l(0); \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
mr r11,r1; \
beq 1f; \
mfspr r11,SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
tophys(r11,r11); \
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
lwz r10,crit_save@l(0); \
stw r10,GPR10(r11); \
lwz r12,4+crit_save@l(0); \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r12,SRR0; /* save SRR0 and SRR1 in the frame */\
stw r12,_SRR0(r11); /* since they may have had stuff */\
mfspr r9,SRR1; /* in them at the point where the */\
stw r9,_SRR1(r11); /* exception was taken */\
mfspr r12,SRR2; \
stw r1,GPR1(r11); \
mfspr r9,SRR3; \
stw r1,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
#endif
/*
* State at this point:
* r9 saved in stack frame, now saved SRR3 & ~MSR_WE
* r10 saved in crit_r10 and in stack frame, trashed
* r11 saved in crit_r11 and in stack frame,
* now phys stack/exception frame pointer
* r12 saved in stack frame, now saved SRR2
* SPRG0,1,4,5,6,7 saved in crit_sprg0,1,4,5,6,7
* PID saved in crit_pid
* SRR0,1 saved in crit_srr0,1
* CR saved in stack frame, CR0.EQ = !SRR3.PR
* LR, DEAR, ESR in stack frame
* r1 saved in stack frame, now virt stack/excframe pointer
* r0, r3-r8 saved in stack frame
*/
/*
* Exception vectors.
......@@ -257,12 +237,6 @@ _GLOBAL(crit_srr1)
. = n; \
label:
#define FINISH_EXCEPTION(func) \
bl transfer_to_handler_full; \
.long func; \
.long ret_from_except_full
#define EXCEPTION(n, label, hdlr, xfer) \
START_EXCEPTION(n, label); \
NORMAL_EXCEPTION_PROLOG; \
......@@ -274,13 +248,14 @@ label:
CRITICAL_EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, transfer_to_handler_full, \
ret_from_except_full)
NOCOPY, crit_transfer_to_handler, \
ret_from_crit_exc)
#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
li r10,trap; \
stw r10,TRAP(r11); \
li r10,msr; \
lis r10,msr@h; \
ori r10,r10,msr@l; \
copyee(r10, r9); \
bl tfer; \
.long hdlr; \
......@@ -732,7 +707,47 @@ label:
/* 0x2000 - Debug Exception
*/
CRITICAL_EXCEPTION(0x2000, DebugTrap, DebugException)
START_EXCEPTION(0x2000, DebugTrap)
CRITICAL_EXCEPTION_PROLOG
/*
* If this is a single step or branch-taken exception in an
* exception entry sequence, it was probably meant to apply to
* the code where the exception occurred (since exception entry
* doesn't turn off DE automatically). We simulate the effect
* of turning off DE on entry to an exception handler by turning
* off DE in the SRR3 value and clearing the debug status.
*/
mfspr r10,SPRN_DBSR /* check single-step/branch taken */
andis. r10,r10,(DBSR_IC|DBSR_BT)@h
beq+ 1f
andi. r0,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */
beq 2f /* branch if we need to fix it up... */
/* continue normal handling for a critical exception... */
1: mfspr r4,SPRN_DBSR
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_TEMPLATE(DebugException, 0x2002, \
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
/* here it looks like we got an inappropriate debug exception. */
2: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */
mtspr SPRN_DBSR,r10 /* clear the IC/BT debug intr status */
/* restore state and get out */
lwz r10,_CCR(r11)
lwz r0,GPR0(r11)
lwz r1,GPR1(r11)
mtcrf 0x80,r10
mtspr SRR2,r12
mtspr SRR3,r9
lwz r9,GPR9(r11)
lwz r12,GPR12(r11)
lwz r10,crit_r10@l(0)
lwz r11,crit_r11@l(0)
PPC405_ERR77_SYNC
rfci
b .
/*
* The other Data TLB exceptions bail out to this point
......@@ -747,60 +762,6 @@ DataAccess:
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x300, do_page_fault)
#if 0
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
* where an instruction that we are trying to single step causes
* an exception (eg ITLB miss) and thus the first instruction of
* the exception handler generates a single step debug exception.
*
* If we get a debug trap on the first instruction of an exception handler,
* we reset the MSR_DE in the _exception handlers_ MSR (the debug trap is
* a critical exception, so we are using SPRN_SRR3 to manipulate the MSR).
* The exception handler was handling a non-critical interrupt, so it will
* save (and later restore) the MSR via SPRN_SRR1, which will still have
* the MSR_DE bit set.
*/
check_single_step_in_exception:
/* This first instruction was already executed by the exception
* handler and must be the first instruction of every exception
* handler.
*/
mtspr SPRN_SPRG0,r10 /* Save some working registers... */
mtspr SPRN_SPRG1,r11
mfcr r10 /* ..and the cr because we change it */
mfspr r11,SPRN_SRR3 /* MSR at the time of fault */
andi. r11,r11,MSR_PR
bne+ 2f /* trapped from problem state */
mfspr r11,SPRN_SRR2 /* Faulting instruction address */
cmplwi r11,0x2100
bgt+ 2f /* address above exception vectors */
lis r11,DBSR_IC@h /* Remove the trap status */
mtspr SPRN_DBSR,r11
mfspr r11,SPRN_SRR3
rlwinm r11,r11,0,23,21 /* clear MSR_DE */
mtspr SPRN_SRR3, r11 /* restore MSR at rcfi without DE */
mtcrf 0xff,r10 /* restore registers */
mfspr r11,SPRN_SPRG1
mfspr r10,SPRN_SPRG0
sync
rfci /* return to the exception handler */
b . /* prevent prefetch past rfi */
2:
mtcrf 0xff,r10 /* restore registers */
mfspr r11,SPRN_SPRG1
mfspr r10,SPRN_SPRG0
b ret_to_debug_exception
#endif 0
/* Other PowerPC processors, namely those derived from the 6xx-series
* have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
* However, for the 4xx-series processors these are neither defined nor
......@@ -911,7 +872,8 @@ start_here:
lis r4,2f@h
ori r4,r4,2f@l
tophys(r4,r4)
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
mtspr SRR0,r4
mtspr SRR1,r3
rfi
......@@ -934,7 +896,8 @@ start_here:
stw r6, 0(r5)
/* Now turn on the MMU for real! */
li r4,MSR_KERNEL
lis r4,MSR_KERNEL@h
ori r4,r4,MSR_KERNEL@l
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
mtspr SRR0,r3
......
......@@ -35,7 +35,11 @@
/*
* Set of msr bits that gdb can change on behalf of a process.
*/
#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
#ifdef CONFIG_4xx
#define MSR_DEBUGCHANGE 0
#else
#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
#endif
/*
* does not yet catch signals sent when the child dies.
......@@ -132,8 +136,14 @@ set_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
if (regs != NULL) {
#ifdef CONFIG_4xx
task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
/* MSR.DE should already be set */
#else
regs->msr |= MSR_SE;
#endif
}
}
static inline void
......@@ -141,8 +151,13 @@ clear_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
if (regs != NULL) {
#ifdef CONFIG_4xx
task->thread.dbcr0 = 0;
#else
regs->msr &= ~MSR_SE;
#endif
}
}
/*
......
......@@ -313,8 +313,6 @@ ProgramCheckException(struct pt_regs *regs)
int isbpt = esr & ESR_PTR;
extern int do_mathemu(struct pt_regs *regs);
if (isbpt)
mtspr(SPRN_DBSR, DBSR_TIE);
#ifdef CONFIG_MATH_EMULATION
if (!isbpt && do_mathemu(regs) == 0)
return;
......@@ -436,28 +434,20 @@ SoftwareEmulation(struct pt_regs *regs)
#if defined(CONFIG_4xx)
void DebugException(struct pt_regs *regs)
void DebugException(struct pt_regs *regs, unsigned long debug_status)
{
unsigned long debug_status;
debug_status = mfspr(SPRN_DBSR);
regs->msr &= ~MSR_DE; /* Turn off 'debug' bit */
#if 0
if (debug_status & DBSR_TIE) { /* trap instruction*/
mtspr(SPRN_DBSR, DBSR_TIE);
if (!user_mode(regs) && debugger_bpt(regs))
return;
_exception(SIGTRAP, regs);
} else if (debug_status & DBSR_IC) { /* instruction completion */
mtspr(SPRN_DBSR, DBSR_IC);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
}
#endif
if (debug_status & DBSR_IC) { /* instruction completion */
if (!user_mode(regs) && debugger_sstep(regs))
return;
current->thread.dbcr0 &= ~DBCR0_IC;
_exception(SIGTRAP, regs);
}
}
......
......@@ -50,7 +50,11 @@
#else
#define MSR_ (MSR_ME|MSR_RI)
#endif
#ifdef CONFIG_4xx
#define MSR_KERNEL (MSR_|MSR_IR|MSR_DR|MSR_CE|MSR_DE)
#else
#define MSR_KERNEL (MSR_|MSR_IR|MSR_DR)
#endif
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
/* Floating Point Status and Control Register (FPSCR) Fields */
......@@ -159,6 +163,7 @@
#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */
#define SPRN_DBSR 0x3F0 /* Debug Status Register */
#define DBSR_IC 0x80000000 /* Instruction Completion */
#define DBSR_BT 0x40000000 /* Branch taken */
#define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */
#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
#define DCCR_NOCACHE 0 /* Noncacheable */
......@@ -642,6 +647,10 @@ struct thread_struct {
void *pgdir; /* root of page-table tree */
int fpexc_mode; /* floating-point exception mode */
signed long last_syscall;
#ifdef CONFIG_4xx
unsigned long dbcr0; /* debug control register values */
unsigned long dbcr1;
#endif
double fpr[32]; /* Complete floating point set */
unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */
unsigned long fpscr; /* Floating point status */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment