Commit 6c52a96e authored by David S. Miller's avatar David S. Miller

[SPARC64]: Revamp Spitfire error trap handling.

Current uncorrectable error handling was poor enough
that the processor could just loop taking the same
trap over and over again.  Fix things up so that we
at least get a log message and perhaps even some register
state.

In the process, much consolidation became possible,
particularly with the correctable error handler.

Prefix assembler and C function names with "spitfire"
to indicate that these are for Ultra-I/II/IIi/IIe only.

More work is needed to make these routines robust and
featureful to the level of the Ultra-III error handlers.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bde4e4ee
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/visasm.h> #include <asm/visasm.h>
#include <asm/estate.h> #include <asm/estate.h>
#include <asm/auxio.h> #include <asm/auxio.h>
#include <asm/sfafsr.h>
#define curptr g6 #define curptr g6
...@@ -690,9 +691,159 @@ netbsd_syscall: ...@@ -690,9 +691,159 @@ netbsd_syscall:
retl retl
nop nop
.globl __do_data_access_exception /* We need to carefully read the error status, ACK
.globl __do_data_access_exception_tl1 * the errors, prevent recursive traps, and pass the
__do_data_access_exception_tl1: * information on to C code for logging.
*
* We pass the AFAR in as-is, and we encode the status
* information as described in asm-sparc64/sfafsr.h
*/
.globl __spitfire_access_error
__spitfire_access_error:
/* Disable ESTATE error reporting so that we do not
* take recursive traps and RED state the processor.
*/
stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
mov UDBE_UE, %g1
ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
/* __spitfire_cee_trap branches here with AFSR in %g4 and
* UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the
* ESTATE Error Enable register.
*/
__spitfire_cee_trap_continue:
ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
rdpr %tt, %g3
and %g3, 0x1ff, %g3 ! Paranoia
sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
or %g4, %g3, %g4
rdpr %tl, %g3
cmp %g3, 1
mov 1, %g3
bleu %xcc, 1f
sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
or %g4, %g3, %g4
/* Read in the UDB error register state, clearing the
* sticky error bits as-needed. We only clear them if
* the UE bit is set. Likewise, __spitfire_cee_trap
* below will only do so if the CE bit is set.
*
* NOTE: UltraSparc-I/II have high and low UDB error
* registers, corresponding to the two UDB units
* present on those chips. UltraSparc-IIi only
* has a single UDB, called "SDB" in the manual.
* For IIi the upper UDB register always reads
* as zero so for our purposes things will just
* work with the checks below.
*/
1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
and %g3, 0x3ff, %g7 ! Paranoia
sllx %g7, SFSTAT_UDBH_SHIFT, %g7
or %g4, %g7, %g4
andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
be,pn %xcc, 1f
nop
stxa %g3, [%g0] ASI_UDB_ERROR_W
membar #Sync
1: mov 0x18, %g3
ldxa [%g3] ASI_UDBL_ERROR_R, %g3
and %g3, 0x3ff, %g7 ! Paranoia
sllx %g7, SFSTAT_UDBL_SHIFT, %g7
or %g4, %g7, %g4
andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
be,pn %xcc, 1f
nop
mov 0x18, %g7
stxa %g3, [%g7] ASI_UDB_ERROR_W
membar #Sync
1: /* Ok, now that we've latched the error state,
* clear the sticky bits in the AFSR.
*/
stxa %g4, [%g0] ASI_AFSR
membar #Sync
rdpr %tl, %g2
cmp %g2, 1
rdpr %pil, %g2
bleu,pt %xcc, 1f
wrpr %g0, 15, %pil
ba,pt %xcc, etraptl1
rd %pc, %g7
ba,pt %xcc, 2f
nop
1: ba,pt %xcc, etrap_irq
rd %pc, %g7
2: mov %l4, %o1
mov %l5, %o2
call spitfire_access_error
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
/* This is the trap handler entry point for ECC correctable
* errors. They are corrected, but we listen for the trap
* so that the event can be logged.
*
* Disrupting errors are either:
* 1) single-bit ECC errors during UDB reads to system
* memory
* 2) data parity errors during write-back events
*
* As far as I can make out from the manual, the CEE trap
* is only for correctable errors during memory read
* accesses by the front-end of the processor.
*
* The code below is only for trap level 1 CEE events,
* as it is the only situation where we can safely record
* and log. For trap level >1 we just clear the CE bit
* in the AFSR and return.
*
* This is just like __spiftire_access_error above, but it
* specifically handles correctable errors. If an
* uncorrectable error is indicated in the AFSR we
* will branch directly above to __spitfire_access_error
* to handle it instead. Uncorrectable therefore takes
* priority over correctable, and the error logging
* C code will notice this case by inspecting the
* trap type.
*/
.globl __spitfire_cee_trap
__spitfire_cee_trap:
ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
mov 1, %g3
sllx %g3, SFAFSR_UE_SHIFT, %g3
andcc %g4, %g3, %g0 ! Check for UE
bne,pn %xcc, __spitfire_access_error
nop
/* Ok, in this case we only have a correctable error.
* Indicate we only wish to capture that state in register
* %g1, and we only disable CE error reporting unlike UE
* handling which disables all errors.
*/
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
andn %g3, ESTATE_ERR_CE, %g3
stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
ba,pt %xcc, __spitfire_cee_trap_continue
mov UDBE_CE, %g1
.globl __spitfire_data_access_exception
.globl __spitfire_data_access_exception_tl1
__spitfire_data_access_exception_tl1:
rdpr %pstate, %g4 rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3 mov TLB_SFSR, %g3
...@@ -714,12 +865,12 @@ __do_data_access_exception_tl1: ...@@ -714,12 +865,12 @@ __do_data_access_exception_tl1:
109: or %g7, %lo(109b), %g7 109: or %g7, %lo(109b), %g7
mov %l4, %o1 mov %l4, %o1
mov %l5, %o2 mov %l5, %o2
call data_access_exception_tl1 call spitfire_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap ba,pt %xcc, rtrap
clr %l6 clr %l6
__do_data_access_exception: __spitfire_data_access_exception:
rdpr %pstate, %g4 rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3 mov TLB_SFSR, %g3
...@@ -733,14 +884,14 @@ __do_data_access_exception: ...@@ -733,14 +884,14 @@ __do_data_access_exception:
109: or %g7, %lo(109b), %g7 109: or %g7, %lo(109b), %g7
mov %l4, %o1 mov %l4, %o1
mov %l5, %o2 mov %l5, %o2
call data_access_exception call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap ba,pt %xcc, rtrap
clr %l6 clr %l6
.globl __do_instruction_access_exception .globl __spitfire_insn_access_exception
.globl __do_instruction_access_exception_tl1 .globl __spitfire_insn_access_exception_tl1
__do_instruction_access_exception_tl1: __spitfire_insn_access_exception_tl1:
rdpr %pstate, %g4 rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3 mov TLB_SFSR, %g3
...@@ -753,12 +904,12 @@ __do_instruction_access_exception_tl1: ...@@ -753,12 +904,12 @@ __do_instruction_access_exception_tl1:
109: or %g7, %lo(109b), %g7 109: or %g7, %lo(109b), %g7
mov %l4, %o1 mov %l4, %o1
mov %l5, %o2 mov %l5, %o2
call instruction_access_exception_tl1 call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap ba,pt %xcc, rtrap
clr %l6 clr %l6
__do_instruction_access_exception: __spitfire_insn_access_exception:
rdpr %pstate, %g4 rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3 mov TLB_SFSR, %g3
...@@ -771,102 +922,11 @@ __do_instruction_access_exception: ...@@ -771,102 +922,11 @@ __do_instruction_access_exception:
109: or %g7, %lo(109b), %g7 109: or %g7, %lo(109b), %g7
mov %l4, %o1 mov %l4, %o1
mov %l5, %o2 mov %l5, %o2
call instruction_access_exception call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap ba,pt %xcc, rtrap
clr %l6 clr %l6
/* This is the trap handler entry point for ECC correctable
* errors. They are corrected, but we listen for the trap
* so that the event can be logged.
*
* Disrupting errors are either:
* 1) single-bit ECC errors during UDB reads to system
* memory
* 2) data parity errors during write-back events
*
* As far as I can make out from the manual, the CEE trap
* is only for correctable errors during memory read
* accesses by the front-end of the processor.
*
* The code below is only for trap level 1 CEE events,
* as it is the only situation where we can safely record
* and log. For trap level >1 we just clear the CE bit
* in the AFSR and return.
*/
/* Our trap handling infrastructure allows us to preserve
* two 64-bit values during etrap for arguments to
* subsequent C code. Therefore we encode the information
* as follows:
*
* value 1) Full 64-bits of AFAR
* value 2) Low 33-bits of AFSR, then bits 33-->42
* are UDBL error status and bits 43-->52
* are UDBH error status
*/
.align 64
.globl cee_trap
cee_trap:
ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR
ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR
sllx %g1, 31, %g1 ! Clear reserved bits
srlx %g1, 31, %g1 ! in AFSR
/* NOTE: UltraSparc-I/II have high and low UDB error
* registers, corresponding to the two UDB units
* present on those chips. UltraSparc-IIi only
* has a single UDB, called "SDB" in the manual.
* For IIi the upper UDB register always reads
* as zero so for our purposes things will just
* work with the checks below.
*/
ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status
andcc %g3, (1 << 8), %g4 ! Check CE bit
sllx %g3, (64 - 10), %g3 ! Clear reserved bits
srlx %g3, (64 - 10), %g3 ! in UDB-Low error status
sllx %g3, (33 + 0), %g3 ! Shift up to encoding area
or %g1, %g3, %g1 ! Or it in
be,pn %xcc, 1f ! Branch if CE bit was clear
nop
stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL
membar #Sync ! Synchronize ASI stores
1: mov 0x18, %g5 ! Addr of UDB-High error status
ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it
andcc %g3, (1 << 8), %g4 ! Check CE bit
sllx %g3, (64 - 10), %g3 ! Clear reserved bits
srlx %g3, (64 - 10), %g3 ! in UDB-High error status
sllx %g3, (33 + 10), %g3 ! Shift up to encoding area
or %g1, %g3, %g1 ! Or it in
be,pn %xcc, 1f ! Branch if CE bit was clear
nop
nop
stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH
membar #Sync ! Synchronize ASI stores
1: mov 1, %g5 ! AFSR CE bit is
sllx %g5, 20, %g5 ! bit 20
stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR
membar #Sync ! Synchronize ASI stores
sllx %g2, (64 - 41), %g2 ! Clear reserved bits
srlx %g2, (64 - 41), %g2 ! in latched AFAR
andn %g2, 0x0f, %g2 ! Finish resv bit clearing
mov %g1, %g4 ! Move AFSR+UDB* into save reg
mov %g2, %g5 ! Move AFAR into save reg
rdpr %pil, %g2
wrpr %g0, 15, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
mov %l4, %o0
mov %l5, %o1
call cee_log
add %sp, PTREGS_OFF, %o2
ba,a,pt %xcc, rtrap_irq
/* Capture I/D/E-cache state into per-cpu error scoreboard. /* Capture I/D/E-cache state into per-cpu error scoreboard.
* *
* %g1: (TL>=0) ? 1 : 0 * %g1: (TL>=0) ? 1 : 0
......
This diff is collapsed.
...@@ -18,9 +18,10 @@ sparc64_ttable_tl0: ...@@ -18,9 +18,10 @@ sparc64_ttable_tl0:
tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
tl0_iax: membar #Sync tl0_iax: membar #Sync
TRAP_NOSAVE_7INSNS(__do_instruction_access_exception) TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception)
tl0_resv009: BTRAP(0x9) tl0_resv009: BTRAP(0x9)
tl0_iae: TRAP(do_iae) tl0_iae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
tl0_ill: membar #Sync tl0_ill: membar #Sync
TRAP_7INSNS(do_illegal_instruction) TRAP_7INSNS(do_illegal_instruction)
...@@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW ...@@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW
tl0_div0: TRAP(do_div0) tl0_div0: TRAP(do_div0)
tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
tl0_resv02f: BTRAP(0x2f) tl0_resv02f: BTRAP(0x2f)
tl0_dax: TRAP_NOSAVE(__do_data_access_exception) tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception)
tl0_resv031: BTRAP(0x31) tl0_resv031: BTRAP(0x31)
tl0_dae: TRAP(do_dae) tl0_dae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv033: BTRAP(0x33) tl0_resv033: BTRAP(0x33)
tl0_mna: TRAP_NOSAVE(do_mna) tl0_mna: TRAP_NOSAVE(do_mna)
tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
...@@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f) ...@@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
tl0_ivec: TRAP_IVEC tl0_ivec: TRAP_IVEC
tl0_paw: TRAP(do_paw) tl0_paw: TRAP(do_paw)
tl0_vaw: TRAP(do_vaw) tl0_vaw: TRAP(do_vaw)
tl0_cee: TRAP_NOSAVE(cee_trap) tl0_cee: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_cee_trap)
tl0_iamiss: tl0_iamiss:
#include "itlb_base.S" #include "itlb_base.S"
tl0_damiss: tl0_damiss:
...@@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8) ...@@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
sparc64_ttable_tl1: sparc64_ttable_tl1:
tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1) tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
tl1_resv009: BTRAPTL1(0x9) tl1_resv009: BTRAPTL1(0x9)
tl1_iae: TRAPTL1(do_iae_tl1) tl1_iae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
tl1_ill: TRAPTL1(do_ill_tl1) tl1_ill: TRAPTL1(do_ill_tl1)
tl1_privop: BTRAPTL1(0x11) tl1_privop: BTRAPTL1(0x11)
...@@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW ...@@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW
tl1_div0: TRAPTL1(do_div0_tl1) tl1_div0: TRAPTL1(do_div0_tl1)
tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1) tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
tl1_resv031: BTRAPTL1(0x31) tl1_resv031: BTRAPTL1(0x31)
tl1_dae: TRAPTL1(do_dae_tl1) tl1_dae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv033: BTRAPTL1(0x33) tl1_resv033: BTRAPTL1(0x33)
tl1_mna: TRAP_NOSAVE(do_mna) tl1_mna: TRAP_NOSAVE(do_mna)
tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
...@@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1) ...@@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1)
tl1_vaw: TRAPTL1(do_vaw_tl1) tl1_vaw: TRAPTL1(do_vaw_tl1)
/* The grotty trick to save %g1 into current->thread.cee_stuff /* The grotty trick to save %g1 into current->thread.cee_stuff
* is because when we take this trap we could be interrupting trap * is because when we take this trap we could be interrupting
* code already using the trap alternate global registers. * trap code already using the trap alternate global registers.
* *
* We cross our fingers and pray that this store/load does * We cross our fingers and pray that this store/load does
* not cause yet another CEE trap. * not cause yet another CEE trap.
......
...@@ -349,7 +349,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) ...@@ -349,7 +349,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
extern void do_fpother(struct pt_regs *regs); extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs); extern void do_privact(struct pt_regs *regs);
extern void data_access_exception(struct pt_regs *regs, extern void spitfire_data_access_exception(struct pt_regs *regs,
unsigned long sfsr, unsigned long sfsr,
unsigned long sfar); unsigned long sfar);
...@@ -394,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) ...@@ -394,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
break; break;
} }
default: default:
data_access_exception(regs, 0, addr); spitfire_data_access_exception(regs, 0, addr);
return 1; return 1;
} }
if (put_user (first >> 32, (u32 __user *)addr) || if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) || __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) || __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) { __put_user ((u32)second, (u32 __user *)(addr + 12))) {
data_access_exception(regs, 0, addr); spitfire_data_access_exception(regs, 0, addr);
return 1; return 1;
} }
} else { } else {
...@@ -414,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) ...@@ -414,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
do_privact(regs); do_privact(regs);
return 1; return 1;
} else if (asi > ASI_SNFL) { } else if (asi > ASI_SNFL) {
data_access_exception(regs, 0, addr); spitfire_data_access_exception(regs, 0, addr);
return 1; return 1;
} }
switch (insn & 0x180000) { switch (insn & 0x180000) {
...@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) ...@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
} }
if (err && !(asi & 0x2 /* NF */)) { if (err && !(asi & 0x2 /* NF */)) {
data_access_exception(regs, 0, addr); spitfire_data_access_exception(regs, 0, addr);
return 1; return 1;
} }
if (asi & 0x8) /* Little */ { if (asi & 0x8) /* Little */ {
...@@ -534,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr ...@@ -534,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
*(u64 *)(f->regs + freg) = value; *(u64 *)(f->regs + freg) = value;
current_thread_info()->fpsaved[0] |= flag; current_thread_info()->fpsaved[0] |= flag;
} else { } else {
daex: data_access_exception(regs, sfsr, sfar); daex: spitfire_data_access_exception(regs, sfsr, sfar);
return; return;
} }
advance(regs); advance(regs);
...@@ -578,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr ...@@ -578,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
__put_user ((u32)value, (u32 __user *)(sfar + 4))) __put_user ((u32)value, (u32 __user *)(sfar + 4)))
goto daex; goto daex;
} else { } else {
daex: data_access_exception(regs, sfsr, sfar); daex: spitfire_data_access_exception(regs, sfsr, sfar);
return; return;
} }
advance(regs); advance(regs);
......
...@@ -318,7 +318,7 @@ fill_fixup_dax: ...@@ -318,7 +318,7 @@ fill_fixup_dax:
nop nop
rdpr %pstate, %l1 ! Prepare to change globals. rdpr %pstate, %l1 ! Prepare to change globals.
mov %g4, %o1 ! Setup args for mov %g4, %o1 ! Setup args for
mov %g5, %o2 ! final call to data_access_exception. mov %g5, %o2 ! final call to spitfire_data_access_exception.
andn %l1, PSTATE_MM, %l1 ! We want to be in RMO andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
mov %g6, %o7 ! Stash away current. mov %g6, %o7 ! Stash away current.
...@@ -330,7 +330,7 @@ fill_fixup_dax: ...@@ -330,7 +330,7 @@ fill_fixup_dax:
mov TSB_REG, %g1 mov TSB_REG, %g1
ldxa [%g1] ASI_IMMU, %g5 ldxa [%g1] ASI_IMMU, %g5
#endif #endif
call data_access_exception call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
b,pt %xcc, rtrap b,pt %xcc, rtrap
...@@ -391,7 +391,7 @@ window_dax_from_user_common: ...@@ -391,7 +391,7 @@ window_dax_from_user_common:
109: or %g7, %lo(109b), %g7 109: or %g7, %lo(109b), %g7
mov %l4, %o1 mov %l4, %o1
mov %l5, %o2 mov %l5, %o2
call data_access_exception call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap ba,pt %xcc, rtrap
clr %l6 clr %l6
......
#ifndef _SPARC64_SFAFSR_H
#define _SPARC64_SFAFSR_H
#include <asm/const.h>
/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
#define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT)
#define SFAFSR_ME_SHIFT 32
#define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT)
#define SFAFSR_PRIV_SHIFT 31
#define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT)
#define SFAFSR_ISAP_SHIFT 30
#define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT)
#define SFAFSR_ETP_SHIFT 29
#define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT)
#define SFAFSR_IVUE_SHIFT 28
#define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT)
#define SFAFSR_TO_SHIFT 27
#define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT)
#define SFAFSR_BERR_SHIFT 26
#define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT)
#define SFAFSR_LDP_SHIFT 25
#define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT)
#define SFAFSR_CP_SHIFT 24
#define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT)
#define SFAFSR_WP_SHIFT 23
#define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT)
#define SFAFSR_EDP_SHIFT 22
#define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT)
#define SFAFSR_UE_SHIFT 21
#define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT)
#define SFAFSR_CE_SHIFT 20
#define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT)
#define SFAFSR_ETS_SHIFT 16
#define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT)
#define SFAFSR_PSYND_SHIFT 0
/* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read
* ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write
*/
#define UDBE_UE (_AC(1,UL) << 9)
#define UDBE_CE (_AC(1,UL) << 8)
#define UDBE_E_SYNDR (_AC(0xff,UL) << 0)
/* The trap handlers for asynchronous errors encode the AFSR and
* other pieces of information into a 64-bit argument for C code
* encoded as follows:
*
* -----------------------------------------------
* | UDB_H | UDB_L | TL>1 | TT | AFSR |
* -----------------------------------------------
* 63 54 53 44 42 41 33 32 0
*
* The AFAR is passed in unchanged.
*/
#define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
#define SFSTAT_UDBH_SHIFT 54
#define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
#define SFSTAT_UDBL_SHIFT 44
#define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT)
#define SFSTAT_TL_GT_ONE_SHIFT 42
#define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT)
#define SFSTAT_TRAP_TYPE_SHIFT 33
#define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT)
#define SFSTAT_AFSR_SHIFT 0
/* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */
#define ESTATE_ERR_CE 0x1 /* Correctable errors */
#define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */
#define ESTATE_ERR_ISAP 0x4 /* System address parity error */
#define ESTATE_ERR_ALL (ESTATE_ERR_CE | \
ESTATE_ERR_NCE | \
ESTATE_ERR_ISAP)
/* The various trap types that report using the above state. */
#define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */
#define TRAP_TYPE_DAE 0x32 /* Data Access Error */
#define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */
#endif /* _SPARC64_SFAFSR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment