Commit 82c7633a authored by Kumar Gala's avatar Kumar Gala Committed by Linus Torvalds

[PATCH] ppc32: Remove SPR short-hand defines

Removed the Special purpose register (SPR) short-hand defines to help with
name space pollution.  All SPRs are now referenced as SPRN_<foo>.
Signed-off-by: default avatarKumar Gala <kumar.gala@freescale.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 70fc7b77
......@@ -657,7 +657,7 @@ static int __init scc_enet_init(void)
cp = cpmp; /* Get pointer to Communication Processor */
immap = (immap_t *)(mfspr(IMMR) & 0xFFFF0000); /* and to internal registers */
immap = (immap_t *)(mfspr(SPRN_IMMR) & 0xFFFF0000); /* and to internal registers */
bd = (bd_t *)__res;
......
......@@ -1735,7 +1735,7 @@ static int __init fec_enet_init(void)
/* Bits moved from Rev. D onward.
*/
if ((mfspr(IMMR) & 0xffff) < 0x0501)
if ((mfspr(SPRN_IMMR) & 0xffff) < 0x0501)
immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */
else
immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */
......
......@@ -47,23 +47,23 @@ disable_6xx_mmu:
/* Clear BATs */
li r8,0
mtspr DBAT0U,r8
mtspr DBAT0L,r8
mtspr DBAT1U,r8
mtspr DBAT1L,r8
mtspr DBAT2U,r8
mtspr DBAT2L,r8
mtspr DBAT3U,r8
mtspr DBAT3L,r8
mtspr SPRN_DBAT0U,r8
mtspr SPRN_DBAT0L,r8
mtspr SPRN_DBAT1U,r8
mtspr SPRN_DBAT1L,r8
mtspr SPRN_DBAT2U,r8
mtspr SPRN_DBAT2L,r8
mtspr SPRN_DBAT3U,r8
mtspr SPRN_DBAT3L,r8
.clearbats_601:
mtspr IBAT0U,r8
mtspr IBAT0L,r8
mtspr IBAT1U,r8
mtspr IBAT1L,r8
mtspr IBAT2U,r8
mtspr IBAT2L,r8
mtspr IBAT3U,r8
mtspr IBAT3L,r8
mtspr SPRN_IBAT0U,r8
mtspr SPRN_IBAT0L,r8
mtspr SPRN_IBAT1U,r8
mtspr SPRN_IBAT1L,r8
mtspr SPRN_IBAT2U,r8
mtspr SPRN_IBAT2L,r8
mtspr SPRN_IBAT3U,r8
mtspr SPRN_IBAT3L,r8
isync
sync
sync
......@@ -84,14 +84,14 @@ disable_6xx_l1cache:
/* Enable, invalidate and then disable the L1 icache/dcache. */
li r8,0
ori r8,r8,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
mfspr r11,HID0
mfspr r11,SPRN_HID0
or r11,r11,r8
andc r10,r11,r8
isync
mtspr HID0,r8
mtspr SPRN_HID0,r8
sync
isync
mtspr HID0,r10
mtspr SPRN_HID0,r10
sync
isync
blr
......@@ -107,17 +107,17 @@ _setup_L2CR:
/* Invalidate/disable L2 cache */
sync
isync
mfspr r8,L2CR
mfspr r8,SPRN_L2CR
rlwinm r8,r8,0,1,31
oris r8,r8,L2CR_L2I@h
sync
isync
mtspr L2CR,r8
mtspr SPRN_L2CR,r8
sync
isync
/* Wait for the invalidation to complete */
mfspr r8,PVR
mfspr r8,SPRN_PVR
srwi r8,r8,16
cmplwi cr0,r8,0x8000 /* 7450 */
cmplwi cr1,r8,0x8001 /* 7455 */
......@@ -126,19 +126,19 @@ _setup_L2CR:
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bne 2f
1: mfspr r8,L2CR /* On 745x, poll L2I bit (bit 10) */
1: mfspr r8,SPRN_L2CR /* On 745x, poll L2I bit (bit 10) */
rlwinm. r9,r8,0,10,10
bne 1b
b 3f
2: mfspr r8,L2CR /* On 75x & 74[01]0, poll L2IP bit (bit 31) */
2: mfspr r8,SPRN_L2CR /* On 75x & 74[01]0, poll L2IP bit (bit 31) */
rlwinm. r9,r8,0,31,31
bne 2b
3: rlwinm r8,r8,0,11,9 /* Turn off L2I bit */
sync
isync
mtspr L2CR,r8
mtspr SPRN_L2CR,r8
sync
isync
blr
......@@ -148,24 +148,24 @@ _setup_L3CR:
/* Invalidate/disable L3 cache */
sync
isync
mfspr r8,L3CR
mfspr r8,SPRN_L3CR
rlwinm r8,r8,0,1,31
ori r8,r8,L3CR_L3I@l
sync
isync
mtspr L3CR,r8
mtspr SPRN_L3CR,r8
sync
isync
/* Wait for the invalidation to complete */
1: mfspr r8,L3CR
1: mfspr r8,SPRN_L3CR
rlwinm. r9,r8,0,21,21
bne 1b
rlwinm r8,r8,0,22,20 /* Turn off L3I bit */
sync
isync
mtspr L3CR,r8
mtspr SPRN_L3CR,r8
sync
isync
blr
......@@ -190,7 +190,7 @@ timebase_period_ns:
*/
.globl udelay
udelay:
mfspr r4,PVR
mfspr r4,SPRN_PVR
srwi r4,r4,16
cmpwi 0,r4,1 /* 601 ? */
bne .udelay_not_601
......@@ -240,11 +240,11 @@ _GLOBAL(flush_instruction_cache)
#ifdef CONFIG_8xx
lis r3, IDC_INVALL@h
mtspr IC_CST, r3
mtspr SPRN_IC_CST, r3
lis r3, IDC_ENABLE@h
mtspr IC_CST, r3
mtspr SPRN_IC_CST, r3
lis r3, IDC_DISABLE@h
mtspr DC_CST, r3
mtspr SPRN_DC_CST, r3
#elif CONFIG_4xx
lis r3,start@h # r9 = &_start
lis r4,_etext@ha
......@@ -258,14 +258,14 @@ _GLOBAL(flush_instruction_cache)
/* Enable, invalidate and then disable the L1 icache/dcache. */
li r3,0
ori r3,r3,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
mfspr r4,HID0
mfspr r4,SPRN_HID0
or r5,r4,r3
isync
mtspr HID0,r5
mtspr SPRN_HID0,r5
sync
isync
ori r5,r4,HID0_ICE /* Enable cache */
mtspr HID0,r5
mtspr SPRN_HID0,r5
sync
isync
#endif
......
......@@ -7,7 +7,7 @@
bl _setup_L2CR; \
\
/* If 745x, turn off L3CR as well */ \
mfspr r8,PVR; \
mfspr r8,SPRN_PVR; \
srwi r8,r8,16; \
\
cmpli cr0,r8,0x8000; /* 7450 */ \
......
......@@ -506,7 +506,7 @@ embed_config(bd_t **bdp)
memcpy(bd->bi_enetaddr, cp, 6);
/* can busfreq be calculated? */
pvr = mfspr(PVR);
pvr = mfspr(SPRN_PVR);
if ((pvr & 0xffff0000) == 0x80820000) {
bd->bi_busfreq = 100000000;
clk_8280(bd);
......
......@@ -52,7 +52,7 @@ start_:
*/
li r4,0x0000
isync
mtspr HID0,r4
mtspr SPRN_HID0,r4
sync
isync
#endif
......
......@@ -21,13 +21,13 @@
.globl mv64x60_board_init
mv64x60_board_init:
/* DINK doesn't enable 745x timebase, so enable here (Adrian Cox) */
mfspr r25,PVR
mfspr r25,SPRN_PVR
srwi r25,r25,16
cmplwi r25,(PVR_7450 >> 16)
bne 1f
mfspr r25,HID0
mfspr r25,SPRN_HID0
oris r25,r25,(HID0_TBEN >> 16)
mtspr HID0,r25
mtspr SPRN_HID0,r25
1:
#if (CONFIG_MV64X60_NEW_BASE != CONFIG_MV64X60_BASE)
li r23,20
......
......@@ -73,16 +73,16 @@ _GLOBAL(__setup_cpu_745x)
/* Enable caches for 603's, 604, 750 & 7400 */
setup_common_caches:
mfspr r11,HID0
mfspr r11,SPRN_HID0
andi. r0,r11,HID0_DCE
ori r11,r11,HID0_ICE|HID0_DCE
ori r8,r11,HID0_ICFI
bne 1f /* don't invalidate the D-cache */
ori r8,r8,HID0_DCI /* unless it wasn't enabled */
1: sync
mtspr HID0,r8 /* enable and invalidate caches */
mtspr SPRN_HID0,r8 /* enable and invalidate caches */
sync
mtspr HID0,r11 /* enable caches */
mtspr SPRN_HID0,r11 /* enable caches */
sync
isync
blr
......@@ -91,13 +91,13 @@ setup_common_caches:
* Enable superscalar execution & branch history table
*/
setup_604_hid0:
mfspr r11,HID0
mfspr r11,SPRN_HID0
ori r11,r11,HID0_SIED|HID0_BHTE
ori r8,r11,HID0_BTCD
sync
mtspr HID0,r8 /* flush branch target address cache */
mtspr SPRN_HID0,r8 /* flush branch target address cache */
sync /* on 604e/604r */
mtspr HID0,r11
mtspr SPRN_HID0,r11
sync
isync
blr
......@@ -150,7 +150,7 @@ setup_7410_workarounds:
* Clear Instruction cache throttling (ICTC)
*/
setup_750_7400_hid0:
mfspr r11,HID0
mfspr r11,SPRN_HID0
ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
......@@ -158,9 +158,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
li r3,HID0_SPD
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
mtspr ICTC,r3 /* Instruction Cache Throttling off */
mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
isync
mtspr HID0,r11
mtspr SPRN_HID0,r11
sync
isync
blr
......@@ -214,7 +214,7 @@ setup_745x_specifics:
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
1:
mfspr r11,HID0
mfspr r11,SPRN_HID0
/* All of the bits we have to set.....
*/
......@@ -232,9 +232,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
mtspr ICTC,r3 /* Instruction Cache Throttling off */
mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
isync
mtspr HID0,r11
mtspr SPRN_HID0,r11
sync
isync
......@@ -285,7 +285,7 @@ _GLOBAL(__save_cpu_setup)
stw r3,CS_HID0(r5)
/* Now deal with CPU type dependent registers */
mfspr r3,PVR
mfspr r3,SPRN_PVR
srwi r3,r3,16
cmplwi cr0,r3,0x8000 /* 7450 */
cmplwi cr1,r3,0x000c /* 7400 */
......@@ -323,7 +323,7 @@ _GLOBAL(__save_cpu_setup)
mfspr r4,SPRN_HID1
stw r4,CS_HID1(r5)
/* If rev 2.x, backup HID2 */
mfspr r3,PVR
mfspr r3,SPRN_PVR
andi. r3,r3,0xff00
cmpwi cr0,r3,0x0200
bne 1f
......@@ -354,7 +354,7 @@ _GLOBAL(__restore_cpu_setup)
isync
/* Now deal with CPU type dependent registers */
mfspr r3,PVR
mfspr r3,SPRN_PVR
srwi r3,r3,16
cmplwi cr0,r3,0x8000 /* 7450 */
cmplwi cr1,r3,0x000c /* 7400 */
......@@ -414,7 +414,7 @@ _GLOBAL(__restore_cpu_setup)
* to PLL 0 on all
*/
/* If rev 2.x, restore HID2 with low voltage bit cleared */
mfspr r3,PVR
mfspr r3,SPRN_PVR
andi. r3,r3,0xff00
cmpwi cr0,r3,0x0200
bne 4f
......
......@@ -47,8 +47,8 @@
#ifdef CONFIG_BOOKE
#define COR r8 /* Critical Offset Register (COR) */
#define BOOKE_LOAD_COR lis COR,crit_save@ha
#define BOOKE_REST_COR mfspr COR,SPRG2
#define BOOKE_SAVE_COR mtspr SPRG2,COR
#define BOOKE_REST_COR mfspr COR,SPRN_SPRG2
#define BOOKE_SAVE_COR mtspr SPRN_SPRG2,COR
#else
#define COR 0
#define BOOKE_LOAD_COR
......@@ -59,13 +59,13 @@
#ifdef CONFIG_BOOKE
.globl mcheck_transfer_to_handler
mcheck_transfer_to_handler:
mtspr SPRG6W,r8
mtspr SPRN_SPRG6W,r8
lis r8,mcheck_save@ha
lwz r0,mcheck_r10@l(r8)
stw r0,GPR10(r11)
lwz r0,mcheck_r11@l(r8)
stw r0,GPR11(r11)
mfspr r8,SPRG6R
mfspr r8,SPRN_SPRG6R
b transfer_to_handler_full
#endif
......@@ -101,10 +101,10 @@ transfer_to_handler:
stw r9,_MSR(r11)
andi. r2,r9,MSR_PR
mfctr r12
mfspr r2,XER
mfspr r2,SPRN_XER
stw r12,_CTR(r11)
stw r2,_XER(r11)
mfspr r12,SPRG3
mfspr r12,SPRN_SPRG3
addi r2,r12,-THREAD
tovirt(r2,r2) /* set r2 to current */
beq 2f /* if from user, fix up THREAD.regs */
......@@ -152,8 +152,8 @@ transfer_to_handler_cont:
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
FIX_SRR1(r10,r12)
mtspr SRR0,r11
mtspr SRR1,r10
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
......@@ -177,8 +177,8 @@ stack_ovf:
addi r9,r9,StackOverflow@l
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
FIX_SRR1(r10,r12)
mtspr SRR0,r9
mtspr SRR1,r10
mtspr SPRN_SRR0,r9
mtspr SPRN_SRR1,r10
SYNC
RFI
......@@ -260,8 +260,8 @@ syscall_exit_cont:
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
mtspr SRR0,r7
mtspr SRR1,r8
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8
SYNC
RFI
......@@ -538,7 +538,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
tophys(r0,r4)
CLR_TOP32(r0)
mtspr SPRG3,r0 /* Update current THREAD phys addr */
mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
lwz r1,KSP(r4) /* Load new stack pointer */
/* save the old current 'last' for return value */
......@@ -642,7 +642,7 @@ restore:
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
mtspr XER,r10
mtspr SPRN_XER,r10
mtctr r11
PPC405_ERR77(0,r1)
......@@ -675,8 +675,8 @@ exc_exit_restart:
lwz r9,_MSR(r1)
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
mtspr SRR0,r12
mtspr SRR1,r9
mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r9
REST_4GPRS(9, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
......@@ -702,8 +702,8 @@ exc_exit_restart:
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
exc_exit_start:
mtspr SRR0,r11
mtspr SRR1,r12
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
REST_2GPRS(11, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
......@@ -742,7 +742,7 @@ ret_from_crit_exc:
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
mtspr XER,r10
mtspr SPRN_XER,r10
mtctr r11
PPC405_ERR77(0,r1)
......@@ -766,8 +766,8 @@ ret_from_crit_exc:
mtspr SPRN_ESR,r10
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
mtspr CSRR0,r11
mtspr CSRR1,r12
mtspr SPRN_CSRR0,r11
mtspr SPRN_CSRR1,r12
lwz r9,GPR9(r1)
lwz r12,GPR12(r1)
BOOKE_SAVE_COR
......@@ -787,9 +787,9 @@ ret_from_crit_exc:
lwz r10,crit_sprg7@l(COR)
mtspr SPRN_SPRG7,r10
lwz r10,crit_srr0@l(COR)
mtspr SRR0,r10
mtspr SPRN_SRR0,r10
lwz r10,crit_srr1@l(COR)
mtspr SRR1,r10
mtspr SPRN_SRR1,r10
lwz r10,crit_pid@l(COR)
mtspr SPRN_PID,r10
lwz r10,GPR10(r1)
......@@ -820,7 +820,7 @@ ret_from_mcheck_exc:
lwz r10,_XER(r1)
lwz r11,_CTR(r1)
mtspr XER,r10
mtspr SPRN_XER,r10
mtctr r11
stwcx. r0,0,r1 /* to clear the reservation */
......@@ -835,11 +835,11 @@ ret_from_mcheck_exc:
mtspr SPRN_ESR,r10
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
mtspr MCSRR0,r11
mtspr MCSRR1,r12
mtspr SPRN_MCSRR0,r11
mtspr SPRN_MCSRR1,r12
lwz r9,GPR9(r1)
lwz r12,GPR12(r1)
mtspr SPRG6W,r8
mtspr SPRN_SPRG6W,r8
lis r8,mcheck_save@ha
lwz r10,mcheck_sprg0@l(r8)
mtspr SPRN_SPRG0,r10
......@@ -852,19 +852,19 @@ ret_from_mcheck_exc:
lwz r10,mcheck_sprg7@l(r8)
mtspr SPRN_SPRG7,r10
lwz r10,mcheck_srr0@l(r8)
mtspr SRR0,r10
mtspr SPRN_SRR0,r10
lwz r10,mcheck_srr1@l(r8)
mtspr SRR1,r10
mtspr SPRN_SRR1,r10
lwz r10,mcheck_csrr0@l(r8)
mtspr CSRR0,r10
mtspr SPRN_CSRR0,r10
lwz r10,mcheck_csrr1@l(r8)
mtspr CSRR1,r10
mtspr SPRN_CSRR1,r10
lwz r10,mcheck_pid@l(r8)
mtspr SPRN_PID,r10
lwz r10,GPR10(r1)
lwz r11,GPR11(r1)
lwz r1,GPR1(r1)
mfspr r8,SPRG6R
mfspr r8,SPRN_SPRG6R
RFMCI
#endif /* CONFIG_BOOKE */
......@@ -997,9 +997,9 @@ _GLOBAL(enter_rtas)
li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
mtlr r6
CLR_TOP32(r7)
mtspr SPRG2,r7
mtspr SRR0,r8
mtspr SRR1,r9
mtspr SPRN_SPRG2,r7
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
RFI
1: tophys(r9,r1)
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
......@@ -1007,9 +1007,9 @@ _GLOBAL(enter_rtas)
FIX_SRR1(r9,r0)
addi r1,r1,INT_FRAME_SIZE
li r0,0
mtspr SPRG2,r0
mtspr SRR0,r8
mtspr SRR1,r9
mtspr SPRN_SPRG2,r0
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
RFI /* return to caller */
.globl machine_check_in_rtas
......
This diff is collapsed.
......@@ -162,10 +162,10 @@ skpinv: addi r4,r4,1 /* Increment */
/* Force context change */
mfmsr r0
mtspr SRR1, r0
mtspr SPRN_SRR1, r0
lis r0,3f@h
ori r0,r0,3f@l
mtspr SRR0,r0
mtspr SPRN_SRR0,r0
sync
rfi
......@@ -238,7 +238,7 @@ skpinv: addi r4,r4,1 /* Increment */
/* ptr to current thread */
addi r4,r2,THREAD /* init task's THREAD */
mtspr SPRG3,r4
mtspr SPRN_SPRG3,r4
/* stack */
lis r1,init_thread_union@h
......@@ -274,8 +274,8 @@ skpinv: addi r4,r4,1 /* Increment */
ori r4,r4,start_kernel@l
lis r3,MSR_KERNEL@h
ori r3,r3,MSR_KERNEL@l
mtspr SRR0,r4
mtspr SRR1,r3
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r3
rfi /* change context and jump to start_kernel */
/*
......@@ -308,12 +308,12 @@ interrupt_base:
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
mtspr SPRN_SPRG7W, r11
/*
* Check if it was a store fault, if not then bail
......@@ -342,7 +342,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
......@@ -390,13 +390,13 @@ interrupt_base:
/* Done...restore registers and get out of here.
*/
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
rfi /* Force context change */
2:
......@@ -404,13 +404,13 @@ interrupt_base:
* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b data_access
/* Instruction Storage Interrupt */
......@@ -449,12 +449,12 @@ interrupt_base:
/* Data TLB Error Interrupt */
START_EXCEPTION(DataTLBError)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
mtspr SPRN_SPRG7W, r11
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
......@@ -472,7 +472,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
......@@ -503,12 +503,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b data_access
/* Instruction TLB Error Interrupt */
......@@ -518,13 +518,13 @@ interrupt_base:
* to a different point.
*/
START_EXCEPTION(InstructionTLBError)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
mfspr r10, SRR0 /* Get faulting address */
mtspr SPRN_SPRG7W, r11
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
......@@ -541,7 +541,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
/* Load PID into MMUCR TID */
......@@ -572,12 +572,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b InstructionStorage
/* Debug Interrupt */
......@@ -661,12 +661,12 @@ finish_tlb_load:
/* Done...restore registers and get out of here.
*/
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
rfi /* Force context change */
/*
......
......@@ -77,10 +77,10 @@ _GLOBAL(_start)
turn_on_mmu:
lis r0,MSR_KERNEL@h
ori r0,r0,MSR_KERNEL@l
mtspr SRR1,r0
mtspr SPRN_SRR1,r0
lis r0,start_here@h
ori r0,r0,start_here@l
mtspr SRR0,r0
mtspr SPRN_SRR0,r0
SYNC
rfi /* enables MMU */
b . /* prevent prefetch past rfi */
......@@ -130,7 +130,7 @@ _GLOBAL(crit_srr1)
mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
beq 1f; \
mfspr r1,SPRG3; /* if from user, start at top of */\
mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
addi r1,r1,THREAD_SIZE; \
1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
......@@ -138,16 +138,16 @@ _GLOBAL(crit_srr1)
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
mfspr r10,SPRG0; \
mfspr r10,SPRN_SPRG0; \
stw r10,GPR10(r11); \
mfspr r12,SPRG1; \
mfspr r12,SPRN_SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r10,SPRG2; \
mfspr r12,SRR0; \
mfspr r10,SPRN_SPRG2; \
mfspr r12,SPRN_SRR0; \
stw r10,GPR1(r11); \
mfspr r9,SRR1; \
mfspr r9,SPRN_SRR1; \
stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
......@@ -165,23 +165,23 @@ _GLOBAL(crit_srr1)
#define CRITICAL_EXCEPTION_PROLOG \
stw r10,crit_r10@l(0); /* save two registers to work with */\
stw r11,crit_r11@l(0); \
mfspr r10,SPRG0; \
mfspr r10,SPRN_SPRG0; \
stw r10,crit_sprg0@l(0); \
mfspr r10,SPRG1; \
mfspr r10,SPRN_SPRG1; \
stw r10,crit_sprg1@l(0); \
mfspr r10,SPRG4; \
mfspr r10,SPRN_SPRG4; \
stw r10,crit_sprg4@l(0); \
mfspr r10,SPRG5; \
mfspr r10,SPRN_SPRG5; \
stw r10,crit_sprg5@l(0); \
mfspr r10,SPRG6; \
mfspr r10,SPRN_SPRG6; \
stw r10,crit_sprg6@l(0); \
mfspr r10,SPRG7; \
mfspr r10,SPRN_SPRG7; \
stw r10,crit_sprg7@l(0); \
mfspr r10,SPRN_PID; \
stw r10,crit_pid@l(0); \
mfspr r10,SRR0; \
mfspr r10,SPRN_SRR0; \
stw r10,crit_srr0@l(0); \
mfspr r10,SRR1; \
mfspr r10,SPRN_SRR1; \
stw r10,crit_srr1@l(0); \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
......@@ -190,7 +190,7 @@ _GLOBAL(crit_srr1)
ori r11,r11,critical_stack_top@l; \
beq 1f; \
/* COMING FROM USER MODE */ \
mfspr r11,SPRG3; /* if from user, start at top of */\
mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
......@@ -204,9 +204,9 @@ _GLOBAL(crit_srr1)
stw r12,_DEAR(r11); /* since they may have had stuff */\
mfspr r9,SPRN_ESR; /* in them at the point where the */\
stw r9,_ESR(r11); /* exception was taken */\
mfspr r12,SRR2; \
mfspr r12,SPRN_SRR2; \
stw r1,GPR1(r11); \
mfspr r9,SRR3; \
mfspr r9,SPRN_SRR3; \
stw r1,0(r11); \
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
......@@ -299,8 +299,8 @@ label:
* and exit. Otherwise, we call heavywight functions to do the work.
*/
START_EXCEPTION(0x0300, DataStorage)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
#ifdef CONFIG_403GCX
stw r12, 0(r0)
stw r9, 4(r0)
......@@ -309,12 +309,12 @@ label:
stw r11, 8(r0)
stw r12, 12(r0)
#else
mtspr SPRG4, r12
mtspr SPRG5, r9
mtspr SPRN_SPRG4, r12
mtspr SPRN_SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
mtspr SPRG7, r11
mtspr SPRG6, r12
mtspr SPRN_SPRG7, r11
mtspr SPRN_SPRG6, r12
#endif
/* First, check if it was a zone fault (which means a user
......@@ -341,7 +341,7 @@ label:
/* Get the PGD for the current thread.
*/
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r11, r11)
......@@ -388,15 +388,15 @@ label:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r12, SPRG6
mfspr r11, SPRG7
mfspr r12, SPRN_SPRG6
mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
mfspr r9, SPRN_SPRG5
mfspr r12, SPRN_SPRG4
#endif
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
b . /* prevent prefetch past rfi */
......@@ -413,15 +413,15 @@ label:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r12, SPRG6
mfspr r11, SPRG7
mfspr r12, SPRN_SPRG6
mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
mfspr r9, SPRN_SPRG5
mfspr r12, SPRN_SPRG4
#endif
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b DataAccess
/*
......@@ -496,8 +496,8 @@ label:
* load TLB entries from the page table if they exist.
*/
START_EXCEPTION(0x1100, DTLBMiss)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
#ifdef CONFIG_403GCX
stw r12, 0(r0)
stw r9, 4(r0)
......@@ -506,12 +506,12 @@ label:
stw r11, 8(r0)
stw r12, 12(r0)
#else
mtspr SPRG4, r12
mtspr SPRG5, r9
mtspr SPRN_SPRG4, r12
mtspr SPRN_SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
mtspr SPRG7, r11
mtspr SPRG6, r12
mtspr SPRN_SPRG7, r11
mtspr SPRN_SPRG6, r12
#endif
mfspr r10, SPRN_DEAR /* Get faulting address */
......@@ -529,7 +529,7 @@ label:
/* Get the PGD for the current thread.
*/
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r11, r11)
......@@ -579,15 +579,15 @@ label:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r12, SPRG6
mfspr r11, SPRG7
mfspr r12, SPRN_SPRG6
mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
mfspr r9, SPRN_SPRG5
mfspr r12, SPRN_SPRG4
#endif
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b DataAccess
/* 0x1200 - Instruction TLB Miss Exception
......@@ -595,8 +595,8 @@ label:
* registers and bailout to a different point.
*/
START_EXCEPTION(0x1200, ITLBMiss)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
#ifdef CONFIG_403GCX
stw r12, 0(r0)
stw r9, 4(r0)
......@@ -605,14 +605,14 @@ label:
stw r11, 8(r0)
stw r12, 12(r0)
#else
mtspr SPRG4, r12
mtspr SPRG5, r9
mtspr SPRN_SPRG4, r12
mtspr SPRN_SPRG5, r9
mfcr r11
mfspr r12, SPRN_PID
mtspr SPRG7, r11
mtspr SPRG6, r12
mtspr SPRN_SPRG7, r11
mtspr SPRN_SPRG6, r12
#endif
mfspr r10, SRR0 /* Get faulting address */
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
......@@ -628,7 +628,7 @@ label:
/* Get the PGD for the current thread.
*/
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
tophys(r11, r11)
......@@ -678,15 +678,15 @@ label:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r12, SPRG6
mfspr r11, SPRG7
mfspr r12, SPRN_SPRG6
mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
mfspr r9, SPRN_SPRG5
mfspr r12, SPRN_SPRG4
#endif
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b InstructionAccess
EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE)
......@@ -754,8 +754,8 @@ label:
lwz r0,GPR0(r11)
lwz r1,GPR1(r11)
mtcrf 0x80,r10
mtspr SRR2,r12
mtspr SRR3,r9
mtspr SPRN_SRR2,r12
mtspr SPRN_SRR3,r9
lwz r9,GPR9(r11)
lwz r12,GPR12(r11)
lwz r10,crit_r10@l(0)
......@@ -831,15 +831,15 @@ finish_tlb_load:
lwz r9, 4(r0)
lwz r12, 0(r0)
#else
mfspr r12, SPRG6
mfspr r11, SPRG7
mfspr r12, SPRN_SPRG6
mfspr r11, SPRN_SPRG7
mtspr SPRN_PID, r12
mtcr r11
mfspr r9, SPRG5
mfspr r12, SPRG4
mfspr r9, SPRN_SPRG5
mfspr r12, SPRN_SPRG4
#endif
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
b . /* prevent prefetch past rfi */
......@@ -863,7 +863,7 @@ start_here:
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRG3,r4
mtspr SPRN_SPRG3,r4
/* stack */
lis r1,init_thread_union@ha
......@@ -894,8 +894,8 @@ start_here:
tophys(r4,r4)
lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
mtspr SRR0,r4
mtspr SRR1,r3
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r3
rfi
b . /* prevent prefetch past rfi */
......@@ -920,8 +920,8 @@ start_here:
ori r4,r4,MSR_KERNEL@l
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
mtspr SRR0,r3
mtspr SRR1,r4
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
rfi /* enable MMU and jump to start_kernel */
b . /* prevent prefetch past rfi */
......
This diff is collapsed.
......@@ -18,7 +18,7 @@
mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
beq 1f; \
mfspr r1,SPRG3; /* if from user, start at top of */\
mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
addi r1,r1,THREAD_SIZE; \
1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
......@@ -26,16 +26,16 @@
stw r10,_CCR(r11); /* save various registers */\
stw r12,GPR12(r11); \
stw r9,GPR9(r11); \
mfspr r10,SPRG0; \
mfspr r10,SPRN_SPRG0; \
stw r10,GPR10(r11); \
mfspr r12,SPRG1; \
mfspr r12,SPRN_SPRG1; \
stw r12,GPR11(r11); \
mflr r10; \
stw r10,_LINK(r11); \
mfspr r10,SPRG4R; \
mfspr r12,SRR0; \
mfspr r10,SPRN_SPRG4R; \
mfspr r12,SPRN_SRR0; \
stw r10,GPR1(r11); \
mfspr r9,SRR1; \
mfspr r9,SPRN_SRR1; \
stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
......@@ -55,27 +55,27 @@
* since the MMU is always on and the save area is offset from KERNELBASE.
*/
#define CRITICAL_EXCEPTION_PROLOG \
mtspr SPRG2,r8; /* SPRG2 only used in criticals */ \
mtspr SPRN_SPRG2,r8; /* SPRG2 only used in criticals */ \
lis r8,crit_save@ha; \
stw r10,crit_r10@l(r8); \
stw r11,crit_r11@l(r8); \
mfspr r10,SPRG0; \
mfspr r10,SPRN_SPRG0; \
stw r10,crit_sprg0@l(r8); \
mfspr r10,SPRG1; \
mfspr r10,SPRN_SPRG1; \
stw r10,crit_sprg1@l(r8); \
mfspr r10,SPRG4R; \
mfspr r10,SPRN_SPRG4R; \
stw r10,crit_sprg4@l(r8); \
mfspr r10,SPRG5R; \
mfspr r10,SPRN_SPRG5R; \
stw r10,crit_sprg5@l(r8); \
mfspr r10,SPRG7R; \
mfspr r10,SPRN_SPRG7R; \
stw r10,crit_sprg7@l(r8); \
mfspr r10,SPRN_PID; \
stw r10,crit_pid@l(r8); \
mfspr r10,SRR0; \
mfspr r10,SPRN_SRR0; \
stw r10,crit_srr0@l(r8); \
mfspr r10,SRR1; \
mfspr r10,SPRN_SRR1; \
stw r10,crit_srr1@l(r8); \
mfspr r8,SPRG2; /* SPRG2 only used in criticals */ \
mfspr r8,SPRN_SPRG2; /* SPRG2 only used in criticals */ \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_CSRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
......@@ -83,7 +83,7 @@
ori r11,r11,critical_stack_top@l; \
beq 1f; \
/* COMING FROM USER MODE */ \
mfspr r11,SPRG3; /* if from user, start at top of */\
mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
......@@ -96,9 +96,9 @@
stw r12,_DEAR(r11); /* since they may have had stuff */\
mfspr r9,SPRN_ESR; /* in them at the point where the */\
stw r9,_ESR(r11); /* exception was taken */\
mfspr r12,CSRR0; \
mfspr r12,SPRN_CSRR0; \
stw r1,GPR1(r11); \
mfspr r9,CSRR1; \
mfspr r9,SPRN_CSRR1; \
stw r1,0(r11); \
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
......@@ -116,31 +116,31 @@
* is always on and the save area is offset from KERNELBASE.
*/
#define MCHECK_EXCEPTION_PROLOG \
mtspr SPRG6W,r8; /* SPRG6 used in machine checks */ \
mtspr SPRN_SPRG6W,r8; /* SPRG6 used in machine checks */ \
lis r8,mcheck_save@ha; \
stw r10,mcheck_r10@l(r8); \
stw r11,mcheck_r11@l(r8); \
mfspr r10,SPRG0; \
mfspr r10,SPRN_SPRG0; \
stw r10,mcheck_sprg0@l(r8); \
mfspr r10,SPRG1; \
mfspr r10,SPRN_SPRG1; \
stw r10,mcheck_sprg1@l(r8); \
mfspr r10,SPRG4R; \
mfspr r10,SPRN_SPRG4R; \
stw r10,mcheck_sprg4@l(r8); \
mfspr r10,SPRG5R; \
mfspr r10,SPRN_SPRG5R; \
stw r10,mcheck_sprg5@l(r8); \
mfspr r10,SPRG7R; \
mfspr r10,SPRN_SPRG7R; \
stw r10,mcheck_sprg7@l(r8); \
mfspr r10,SPRN_PID; \
stw r10,mcheck_pid@l(r8); \
mfspr r10,SRR0; \
mfspr r10,SPRN_SRR0; \
stw r10,mcheck_srr0@l(r8); \
mfspr r10,SRR1; \
mfspr r10,SPRN_SRR1; \
stw r10,mcheck_srr1@l(r8); \
mfspr r10,CSRR0; \
mfspr r10,SPRN_CSRR0; \
stw r10,mcheck_csrr0@l(r8); \
mfspr r10,CSRR1; \
mfspr r10,SPRN_CSRR1; \
stw r10,mcheck_csrr1@l(r8); \
mfspr r8,SPRG6R; /* SPRG6 used in machine checks */ \
mfspr r8,SPRN_SPRG6R; /* SPRG6 used in machine checks */ \
mfcr r10; /* save CR in r10 for now */\
mfspr r11,SPRN_MCSRR1; /* check whether user or kernel */\
andi. r11,r11,MSR_PR; \
......@@ -148,7 +148,7 @@
ori r11,r11,mcheck_stack_top@l; \
beq 1f; \
/* COMING FROM USER MODE */ \
mfspr r11,SPRG3; /* if from user, start at top of */\
mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,THREAD_SIZE; \
1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
......@@ -161,9 +161,9 @@
stw r12,_DEAR(r11); /* since they may have had stuff */\
mfspr r9,SPRN_ESR; /* in them at the point where the */\
stw r9,_ESR(r11); /* exception was taken */\
mfspr r12,MCSRR0; \
mfspr r12,SPRN_MCSRR0; \
stw r1,GPR1(r11); \
mfspr r9,MCSRR1; \
mfspr r9,SPRN_MCSRR1; \
stw r1,0(r11); \
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
......@@ -285,15 +285,15 @@
lwz r0,GPR0(r11); \
lwz r1,GPR1(r11); \
mtcrf 0x80,r10; \
mtspr CSRR0,r12; \
mtspr CSRR1,r9; \
mtspr SPRN_CSRR0,r12; \
mtspr SPRN_CSRR1,r9; \
lwz r9,GPR9(r11); \
lwz r12,GPR12(r11); \
mtspr SPRG2,r8; /* SPRG2 only used in criticals */ \
mtspr SPRN_SPRG2,r8; /* SPRG2 only used in criticals */ \
lis r8,crit_save@ha; \
lwz r10,crit_r10@l(r8); \
lwz r11,crit_r11@l(r8); \
mfspr r8,SPRG2; \
mfspr r8,SPRN_SPRG2; \
\
rfci; \
b .; \
......
......@@ -188,8 +188,8 @@ skpinv: addi r6,r6,1 /* Increment */
1: mflr r9
rlwimi r7,r9,0,20,31
addi r7,r7,24
mtspr SRR0,r7
mtspr SRR1,r6
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r6
rfi
/* 4. Clear out PIDs & Search info */
......@@ -236,8 +236,8 @@ skpinv: addi r6,r6,1 /* Increment */
1: mflr r9
rlwimi r6,r9,0,20,31
addi r6,r6,24
mtspr SRR0,r6
mtspr SRR1,r7
mtspr SPRN_SRR0,r6
mtspr SPRN_SRR1,r7
rfi /* start execution out of TLB1[0] entry */
/* 8. Clear out the temp mapping */
......@@ -302,7 +302,7 @@ skpinv: addi r6,r6,1 /* Increment */
/* ptr to current thread */
addi r4,r2,THREAD /* init task's THREAD */
mtspr SPRG3,r4
mtspr SPRN_SPRG3,r4
/* stack */
lis r1,init_thread_union@h
......@@ -342,8 +342,8 @@ skpinv: addi r6,r6,1 /* Increment */
ori r4,r4,start_kernel@l
lis r3,MSR_KERNEL@h
ori r3,r3,MSR_KERNEL@l
mtspr SRR0,r4
mtspr SRR1,r3
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r3
rfi /* change context and jump to start_kernel */
/*
......@@ -372,12 +372,12 @@ interrupt_base:
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
mtspr SPRN_SPRG7W, r11
/*
* Check if it was a store fault, if not then bail
......@@ -401,7 +401,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
......@@ -442,12 +442,12 @@ interrupt_base:
tlbwe
/* Done...restore registers and get out of here. */
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
rfi /* Force context change */
2:
......@@ -455,12 +455,12 @@ interrupt_base:
* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b data_access
/* Instruction Storage Interrupt */
......@@ -499,12 +499,12 @@ interrupt_base:
/* Data TLB Error Interrupt */
START_EXCEPTION(DataTLBError)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
mtspr SPRN_SPRG7W, r11
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
......@@ -525,7 +525,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
......@@ -548,12 +548,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b data_access
/* Instruction TLB Error Interrupt */
......@@ -563,13 +563,13 @@ interrupt_base:
* to a different point.
*/
START_EXCEPTION(InstructionTLBError)
mtspr SPRG0, r10 /* Save some working registers */
mtspr SPRG1, r11
mtspr SPRG4W, r12
mtspr SPRG5W, r13
mtspr SPRN_SPRG0, r10 /* Save some working registers */
mtspr SPRN_SPRG1, r11
mtspr SPRN_SPRG4W, r12
mtspr SPRN_SPRG5W, r13
mfcr r11
mtspr SPRG7W, r11
mfspr r10, SRR0 /* Get faulting address */
mtspr SPRN_SPRG7W, r11
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
......@@ -589,7 +589,7 @@ interrupt_base:
/* Get the PGD for the current thread */
3:
mfspr r11,SPRG3
mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
4:
......@@ -613,12 +613,12 @@ interrupt_base:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
b InstructionStorage
#ifdef CONFIG_SPE
......@@ -713,12 +713,12 @@ finish_tlb_load:
tlbwe
/* Done...restore registers and get out of here. */
mfspr r11, SPRG7R
mfspr r11, SPRN_SPRG7R
mtcr r11
mfspr r13, SPRG5R
mfspr r12, SPRG4R
mfspr r11, SPRG1
mfspr r10, SPRG0
mfspr r13, SPRN_SPRG5R
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
rfi /* Force context change */
#ifdef CONFIG_SPE
......@@ -762,7 +762,7 @@ load_up_spe:
#endif /* CONFIG_SMP */
/* enable use of SPE after return */
oris r9,r9,MSR_SPE@h
mfspr r5,SPRG3 /* current task's THREAD (phys) */
mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
li r4,1
li r10,THREAD_ACC
stw r4,THREAD_USED_SPE(r5)
......@@ -781,8 +781,8 @@ load_up_spe:
lwz r10,_LINK(r11)
mtlr r10
REST_GPR(10, r11)
mtspr SRR1,r9
mtspr SRR0,r12
mtspr SPRN_SRR1,r9
mtspr SPRN_SRR0,r12
REST_GPR(9, r11)
REST_GPR(12, r11)
lwz r11,GPR11(r11)
......
......@@ -125,14 +125,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* DPM can possibly interfere with the state machine in the processor
* that invalidates the L2 cache tags.
*/
mfspr r8,HID0 /* Save HID0 in r8 */
mfspr r8,SPRN_HID0 /* Save HID0 in r8 */
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
mtspr HID0,r4 /* Disable DPM */
mtspr SPRN_HID0,r4 /* Disable DPM */
sync
/* Get the current enable bit of the L2CR into r4 */
mfspr r4,L2CR
mfspr r4,SPRN_L2CR
/* Tweak some bits */
rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */
......@@ -186,7 +186,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
.balign L1_CACHE_LINE_SIZE
22:
sync
mtspr L2CR,r3
mtspr SPRN_L2CR,r3
sync
b 23f
20:
......@@ -199,27 +199,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Perform a global invalidation */
oris r3,r3,0x0020
sync
mtspr L2CR,r3
mtspr SPRN_L2CR,r3
sync
isync /* For errata */
BEGIN_FTR_SECTION
/* On the 7450, we wait for the L2I bit to clear......
*/
10: mfspr r3,L2CR
10: mfspr r3,SPRN_L2CR
andis. r4,r3,0x0020
bne 10b
b 11f
END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
/* Wait for the invalidation to complete */
3: mfspr r3,L2CR
3: mfspr r3,SPRN_L2CR
rlwinm. r4,r3,0,31,31
bne 3b
11: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */
sync
mtspr L2CR,r3
mtspr SPRN_L2CR,r3
sync
/* See if we need to enable the cache */
......@@ -228,7 +228,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
/* Enable the cache */
oris r3,r3,0x8000
mtspr L2CR,r3
mtspr SPRN_L2CR,r3
sync
4:
......@@ -250,7 +250,7 @@ _GLOBAL(_get_L2CR)
/* Return the L2CR contents */
li r3,0
BEGIN_FTR_SECTION
mfspr r3,L2CR
mfspr r3,SPRN_L2CR
END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
blr
......
......@@ -216,10 +216,10 @@ _GLOBAL(low_choose_750fx_pll)
/* If switching to PLL1, disable HID0:BTIC */
cmplwi cr0,r3,0
beq 1f
mfspr r5,HID0
mfspr r5,SPRN_HID0
rlwinm r5,r5,0,27,25
sync
mtspr HID0,r5
mtspr SPRN_HID0,r5
isync
sync
......@@ -241,10 +241,10 @@ _GLOBAL(low_choose_750fx_pll)
/* If switching to PLL0, enable HID0:BTIC */
cmplwi cr0,r3,0
bne 1f
mfspr r5,HID0
mfspr r5,SPRN_HID0
ori r5,r5,HID0_BTIC
sync
mtspr HID0,r5
mtspr SPRN_HID0,r5
isync
sync
......@@ -579,7 +579,7 @@ _GLOBAL(flush_instruction_cache)
#if defined(CONFIG_8xx)
isync
lis r5, IDC_INVALL@h
mtspr IC_CST, r5
mtspr SPRN_IC_CST, r5
#elif defined(CONFIG_4xx)
#ifdef CONFIG_403GCX
li r3, 512
......@@ -597,14 +597,14 @@ _GLOBAL(flush_instruction_cache)
ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
mtspr SPRN_L1CSR1,r3
#else
mfspr r3,PVR
mfspr r3,SPRN_PVR
rlwinm r3,r3,16,16,31
cmpwi 0,r3,1
beqlr /* for 601, do nothing */
/* 603/604 processor - use invalidate-all bit in HID0 */
mfspr r3,HID0
mfspr r3,SPRN_HID0
ori r3,r3,HID0_ICFI
mtspr HID0,r3
mtspr SPRN_HID0,r3
#endif /* CONFIG_8xx/4xx */
isync
blr
......
......@@ -179,7 +179,7 @@ int show_cpuinfo(struct seq_file *m, void *v)
pvr = cpu_data[i].pvr;
lpj = cpu_data[i].loops_per_jiffy;
#else
pvr = mfspr(PVR);
pvr = mfspr(SPRN_PVR);
lpj = loops_per_jiffy;
#endif
......
......@@ -119,7 +119,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
*/
void smp_send_tlb_invalidate(int cpu)
{
if ( PVR_VER(mfspr(PVR)) == 8 )
if ( PVR_VER(mfspr(SPRN_PVR)) == 8 )
smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0);
}
......@@ -283,7 +283,7 @@ static void __devinit smp_store_cpu_info(int id)
/* assume bogomips are same for everything */
c->loops_per_jiffy = loops_per_jiffy;
c->pvr = mfspr(PVR);
c->pvr = mfspr(SPRN_PVR);
}
void __init smp_prepare_cpus(unsigned int max_cpus)
......
......@@ -486,7 +486,7 @@ static int emulate_instruction(struct pt_regs *regs)
*/
if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
rd = (instword >> 21) & 0x1f;
regs->gpr[rd] = mfspr(PVR);
regs->gpr[rd] = mfspr(SPRN_PVR);
return 0;
}
......
......@@ -101,7 +101,7 @@ _GLOBAL(hash_page)
/* Get PTE (linux-style) and check access */
lis r0,KERNELBASE@h /* check if kernel address */
cmplw 0,r4,r0
mfspr r8,SPRG3 /* current task's THREAD (phys) */
mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
lwz r5,PGDIR(r8) /* virt page-table root */
blt+ 112f /* assume user more likely */
......
......@@ -142,7 +142,7 @@ void __init setbat(int index, unsigned long virt, unsigned long phys,
flags |= _PAGE_COHERENT;
bl = (size >> 17) - 1;
if (PVR_VER(mfspr(PVR)) != 1) {
if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
/* 603, 604, etc. */
/* Do DBAT first */
wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
......
......@@ -97,7 +97,7 @@ ebony_calibrate_decr(void)
* on Rev. C silicon then errata forces us to
* use the internal clock.
*/
switch (PVR_REV(mfspr(PVR))) {
switch (PVR_REV(mfspr(SPRN_PVR))) {
case PVR_REV(PVR_440GP_RB):
freq = EBONY_440GP_RB_SYSCLK;
break;
......
......@@ -140,8 +140,8 @@ mpc834x_sys_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
pvid = mfspr(PVR);
svid = mfspr(SVR);
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
seq_printf(m, "chip\t\t: MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
seq_printf(m, "Vendor\t\t: Freescale Inc.\n");
......@@ -154,7 +154,7 @@ mpc834x_sys_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(HID1);
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
......@@ -193,8 +193,8 @@ mpc834x_sys_set_bat(void)
{
/* we steal the lowest ioremap addr for virt space */
mb();
mtspr(DBAT1U, VIRT_IMMRBAR | 0x1e);
mtspr(DBAT1L, immrbar | 0x2a);
mtspr(SPRN_DBAT1U, VIRT_IMMRBAR | 0x1e);
mtspr(SPRN_DBAT1L, immrbar | 0x2a);
mb();
}
......@@ -257,7 +257,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
}
#endif
identify_ppc_sys_by_id(mfspr(SVR));
identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc834x_sys_setup_arch;
......
......@@ -187,7 +187,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
identify_ppc_sys_by_id(mfspr(SVR));
identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc8540ads_setup_arch;
......
......@@ -197,7 +197,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
identify_ppc_sys_by_id(mfspr(SVR));
identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc8560ads_setup_arch;
......
......@@ -126,8 +126,8 @@ mpc85xx_ads_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
pvid = mfspr(PVR);
svid = mfspr(SVR);
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
seq_printf(m, "chip\t\t: MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
......@@ -137,7 +137,7 @@ mpc85xx_ads_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(HID1);
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
......
......@@ -143,8 +143,8 @@ mpc85xx_cds_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
pvid = mfspr(PVR);
svid = mfspr(SVR);
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
seq_printf(m, "chip\t\t: MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
......@@ -154,7 +154,7 @@ mpc85xx_cds_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(HID1);
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
......@@ -448,7 +448,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
identify_ppc_sys_by_id(mfspr(SVR));
identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = mpc85xx_cds_setup_arch;
......
......@@ -198,7 +198,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
identify_ppc_sys_by_id(mfspr(SVR));
identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = sbc8560_setup_arch;
......
......@@ -126,8 +126,8 @@ sbc8560_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
pvid = mfspr(PVR);
svid = mfspr(SVR);
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
seq_printf(m, "chip\t\t: MPC%s\n", cur_ppc_sys_spec->ppc_sys_name);
seq_printf(m, "Vendor\t\t: Wind River\n");
......@@ -137,7 +137,7 @@ sbc8560_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(HID1);
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
......
......@@ -263,8 +263,8 @@ gp3_show_cpuinfo(struct seq_file *m)
/* get the core frequency */
freq = binfo->bi_intfreq;
pvid = mfspr(PVR);
svid = mfspr(SVR);
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
memsize = total_memory;
......@@ -277,7 +277,7 @@ gp3_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(HID1);
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
/* Display the amount of memory */
......@@ -349,7 +349,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
strcpy(cmd_line, (char *) (r6 + KERNELBASE));
}
identify_ppc_sys_by_id(mfspr(SVR));
identify_ppc_sys_by_id(mfspr(SPRN_SVR));
/* setup the PowerPC module struct */
ppc_md.setup_arch = gp3_setup_arch;
......
......@@ -60,7 +60,7 @@ adir_get_cpu_speed(void)
unsigned long hid1;
int cpu_speed;
hid1 = mfspr(HID1) >> 28;
hid1 = mfspr(SPRN_HID1) >> 28;
hid1 = cpu_750cx[hid1];
......@@ -126,7 +126,7 @@ adir_setup_arch(void)
printk("SBS Adirondack port (C) 2001 SBS Technologies, Inc.\n");
/* Identify the CPU manufacturer */
cpu = mfspr(PVR);
cpu = mfspr(SPRN_PVR);
printk("CPU manufacturer: IBM [rev=%04x]\n", (cpu & 0xffff));
}
......
......@@ -513,8 +513,8 @@ static __inline__ void
chestnut_set_bat(void)
{
mb();
mtspr(DBAT3U, 0xf0001ffe);
mtspr(DBAT3L, 0xf000002a);
mtspr(SPRN_DBAT3U, 0xf0001ffe);
mtspr(SPRN_DBAT3L, 0xf000002a);
mb();
}
......
......@@ -89,7 +89,7 @@ cpci690_get_cpu_speed(void)
{
unsigned long hid1;
hid1 = mfspr(HID1) >> 28;
hid1 = mfspr(SPRN_HID1) >> 28;
return cpci690_get_bus_speed() * cpu_7xx[hid1]/2;
}
......@@ -441,8 +441,8 @@ cpci690_set_bat(u32 addr, u32 size)
size = ((size >> 17) - 1) << 2;
mb();
mtspr(DBAT1U, addr | size | 0x2); /* Vs == 1; Vp == 0 */
mtspr(DBAT1L, addr | 0x2a); /* WIMG bits == 0101; PP == r/w access */
mtspr(SPRN_DBAT1U, addr | size | 0x2); /* Vs == 1; Vp == 0 */
mtspr(SPRN_DBAT1L, addr | 0x2a); /* WIMG bits == 0101; PP == r/w access */
mb();
return;
......
......@@ -80,14 +80,14 @@ ev64260_get_cpu_speed(void)
{
unsigned long pvr, hid1, pll_ext;
pvr = PVR_VER(mfspr(PVR));
pvr = SPRN_VER(mfspr(SPRN_PVR));
if (pvr != PVR_VER(PVR_7450)) {
hid1 = mfspr(HID1) >> 28;
hid1 = mfspr(SPRN_HID1) >> 28;
return ev64260_get_bus_speed() * cpu_7xx[hid1]/2;
}
else {
hid1 = (mfspr(HID1) & 0x0001e000) >> 13;
hid1 = (mfspr(SPRN_HID1) & 0x0001e000) >> 13;
pll_ext = 0; /* No way to read; must get from schematic */
return ev64260_get_bus_speed() * cpu_745x[pll_ext][hid1]/2;
}
......@@ -530,7 +530,7 @@ ev64260_show_cpuinfo(struct seq_file *m)
{
uint pvid;
pvid = mfspr(PVR);
pvid = mfspr(SPRN_PVR);
seq_printf(m, "vendor\t\t: " BOARD_VENDOR "\n");
seq_printf(m, "machine\t\t: " BOARD_MACHINE "\n");
seq_printf(m, "cpu MHz\t\t: %d\n", ev64260_get_cpu_speed()/1000/1000);
......@@ -563,8 +563,8 @@ static __inline__ void
ev64260_set_bat(void)
{
mb();
mtspr(DBAT1U, 0xfb0001fe);
mtspr(DBAT1L, 0xfb00002a);
mtspr(SPRN_DBAT1U, 0xfb0001fe);
mtspr(SPRN_DBAT1L, 0xfb00002a);
mb();
return;
......
......@@ -40,29 +40,29 @@ _GLOBAL(gemini_prom_init)
/* zero out the bats now that the MMU is off */
prom_no_mmu:
li r3,0
mtspr IBAT0U,r3
mtspr IBAT0L,r3
mtspr IBAT1U,r3
mtspr IBAT1L,r3
mtspr IBAT2U,r3
mtspr IBAT2L,r3
mtspr IBAT3U,r3
mtspr IBAT3L,r3
mtspr SPRN_IBAT0U,r3
mtspr SPRN_IBAT0L,r3
mtspr SPRN_IBAT1U,r3
mtspr SPRN_IBAT1L,r3
mtspr SPRN_IBAT2U,r3
mtspr SPRN_IBAT2L,r3
mtspr SPRN_IBAT3U,r3
mtspr SPRN_IBAT3L,r3
mtspr DBAT0U,r3
mtspr DBAT0L,r3
mtspr DBAT1U,r3
mtspr DBAT1L,r3
mtspr DBAT2U,r3
mtspr DBAT2L,r3
mtspr DBAT3U,r3
mtspr DBAT3L,r3
mtspr SPRN_DBAT0U,r3
mtspr SPRN_DBAT0L,r3
mtspr SPRN_DBAT1U,r3
mtspr SPRN_DBAT1L,r3
mtspr SPRN_DBAT2U,r3
mtspr SPRN_DBAT2L,r3
mtspr SPRN_DBAT3U,r3
mtspr SPRN_DBAT3L,r3
#endif
/* the bootloader (as far as I'm currently aware) doesn't mess with page
tables, but since we're already here, might as well zap these, too */
li r4,0
mtspr SDR1,r4
mtspr SPRN_SDR1,r4
li r4,16
mtctr r4
......@@ -75,9 +75,9 @@ prom_no_mmu:
#ifdef CONFIG_SMP
/* The 750 book (and Mot/IBM support) says that this will "assist" snooping
when in SMP. Not sure yet whether this should stay or leave... */
mfspr r4,HID0
mfspr r4,SPRN_HID0
ori r4,r4,HID0_ABE
mtspr HID0,r4
mtspr SPRN_HID0,r4
sync
#endif /* CONFIG_SMP */
blr
......@@ -88,6 +88,6 @@ _GLOBAL(_gemini_reboot)
lis r5,GEMINI_BOOT_INIT@h
ori r5,r5,GEMINI_BOOT_INIT@l
li r6,MSR_IP
mtspr SRR0,r5
mtspr SRR1,r6
mtspr SPRN_SRR0,r5
mtspr SPRN_SRR1,r6
rfi
......@@ -202,8 +202,8 @@ gemini_get_clock_speed(void)
unsigned long hid1, pvr;
int clock;
pvr = mfspr(PVR);
hid1 = (mfspr(HID1) >> 28) & 0xf;
pvr = mfspr(SPRN_PVR);
hid1 = (mfspr(SPRN_HID1) >> 28) & 0xf;
if (PVR_VER(pvr) == 8 ||
PVR_VER(pvr) == 12)
hid1 = cpu_7xx[hid1];
......@@ -238,7 +238,7 @@ void __init gemini_init_l2(void)
reg = readb(GEMINI_L2CFG);
brev = readb(GEMINI_BREV);
fam = readb(GEMINI_FEAT);
pvr = mfspr(PVR);
pvr = mfspr(SPRN_PVR);
switch(PVR_VER(pvr)) {
......@@ -537,8 +537,8 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
int i;
/* Restore BATs for now */
mtspr(DBAT3U, 0xf0001fff);
mtspr(DBAT3L, 0xf000002a);
mtspr(SPRN_DBAT3U, 0xf0001fff);
mtspr(SPRN_DBAT3L, 0xf000002a);
parse_bootinfo(find_bootinfo());
......
......@@ -392,9 +392,9 @@ static int k2_get_cpu_speed(void)
unsigned long hid1;
int cpu_speed;
hid1 = mfspr(HID1) >> 28;
hid1 = mfspr(SPRN_HID1) >> 28;
if ((mfspr(PVR) >> 16) == 8)
if ((mfspr(SPRN_PVR) >> 16) == 8)
hid1 = cpu_7xx[hid1];
else
hid1 = cpu_6xx[hid1];
......@@ -472,7 +472,7 @@ static void __init k2_setup_arch(void)
"(source@mvista.com)\n");
/* Identify the CPU manufacturer */
cpu = PVR_REV(mfspr(PVR));
cpu = PVR_REV(mfspr(SPRN_PVR));
printk(KERN_INFO "CPU manufacturer: %s [rev=%04x]\n",
(cpu & (1 << 15)) ? "IBM" : "Motorola", cpu);
}
......@@ -486,8 +486,8 @@ static void k2_restart(char *cmd)
/* SRR0 has system reset vector, SRR1 has default MSR value */
/* rfi restores MSR from SRR1 and sets the PC to the SRR0 value */
mtspr(SRR0, 0xfff00100);
mtspr(SRR1, 0);
mtspr(SPRN_SRR0, 0xfff00100);
mtspr(SPRN_SRR1, 0);
__asm__ __volatile__("rfi\n\t");
/* not reached */
......@@ -513,10 +513,10 @@ static __inline__ void k2_set_bat(void)
mb();
/* setup DBATs */
mtspr(DBAT2U, 0x80001ffe);
mtspr(DBAT2L, 0x8000002a);
mtspr(DBAT3U, 0xf0001ffe);
mtspr(DBAT3L, 0xf000002a);
mtspr(SPRN_DBAT2U, 0x80001ffe);
mtspr(SPRN_DBAT2L, 0x8000002a);
mtspr(SPRN_DBAT3U, 0xf0001ffe);
mtspr(SPRN_DBAT3L, 0xf000002a);
/* wait for updates */
mb();
......
......@@ -452,7 +452,7 @@ katana_setup_arch(void)
* DD2.0 has bug that requires the L2 to be in WRT mode
* avoid dirty data in cache
*/
if (PVR_REV(mfspr(PVR)) == 0x0200) {
if (PVR_REV(mfspr(SPRN_PVR)) == 0x0200) {
printk(KERN_INFO "DD2.0 detected. Setting L2 cache"
"to Writethrough mode\n");
_set_L2CR(L2CR_L2E | L2CR_L2PE | L2CR_L2WT);
......@@ -733,8 +733,8 @@ static inline void
katana_set_bat(void)
{
mb();
mtspr(DBAT2U, 0xf0001ffe);
mtspr(DBAT2L, 0xf000002a);
mtspr(SPRN_DBAT2U, 0xf0001ffe);
mtspr(SPRN_DBAT2L, 0xf000002a);
mb();
}
......
......@@ -319,8 +319,8 @@ static __inline__ void
lopec_set_bat(void)
{
mb();
mtspr(DBAT1U, 0xf8000ffe);
mtspr(DBAT1L, 0xf800002a);
mtspr(SPRN_DBAT1U, 0xf8000ffe);
mtspr(SPRN_DBAT1L, 0xf800002a);
mb();
}
......
......@@ -470,8 +470,8 @@ static __inline__ void
mcpn765_set_bat(void)
{
mb();
mtspr(DBAT1U, 0xfe8000fe);
mtspr(DBAT1L, 0xfe80002a);
mtspr(SPRN_DBAT1U, 0xfe8000fe);
mtspr(SPRN_DBAT1L, 0xfe80002a);
mb();
}
......
......@@ -246,8 +246,8 @@ static __inline__ void
mvme5100_set_bat(void)
{
mb();
mtspr(DBAT1U, 0xf0001ffe);
mtspr(DBAT1L, 0xf000002a);
mtspr(SPRN_DBAT1U, 0xf0001ffe);
mtspr(SPRN_DBAT1L, 0xf000002a);
mb();
}
......
......@@ -282,8 +282,8 @@ static __inline__ void
pcore_set_bat(void)
{
mb();
mtspr(DBAT3U, 0xf0001ffe);
mtspr(DBAT3L, 0xfe80002a);
mtspr(SPRN_DBAT3U, 0xf0001ffe);
mtspr(SPRN_DBAT3L, 0xfe80002a);
mb();
}
......
......@@ -55,7 +55,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Stop DPM */
mfspr r8,SPRN_HID0 /* Save HID0 in r8 */
mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
mtspr SPRN_HID0,r4 /* Disable DPM */
......@@ -86,13 +86,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync
/* Get the current enable bit of the L2CR into r4 */
mfspr r5,L2CR
mfspr r5,SPRN_L2CR
/* Set to data-only (pre-745x bit) */
oris r3,r5,L2CR_L2DO@h
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr L2CR,r3
1: mtspr SPRN_L2CR,r3
3: sync
isync
b 1f
......@@ -117,7 +117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr L2CR,r5
1: mtspr SPRN_L2CR,r5
3: sync
isync
b 1f
......@@ -129,18 +129,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
isync
/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
oris r4,r5,L2CR_L2I@h
mtspr L2CR,r4
mtspr SPRN_L2CR,r4
sync
isync
xoris r4,r4,L2CR_L2I@h
sync
mtspr L2CR,r4
mtspr SPRN_L2CR,r4
sync
/* now disable the L1 data cache */
mfspr r0,HID0
mfspr r0,SPRN_HID0
rlwinm r0,r0,0,~HID0_DCE
mtspr HID0,r0
mtspr SPRN_HID0,r0
sync
isync
......@@ -239,14 +239,14 @@ flush_disable_745x:
isync
/* Flush the L2 cache using the hardware assist */
mfspr r3,L2CR
mfspr r3,SPRN_L2CR
cmpwi r3,0 /* check if it is enabled first */
bge 4f
oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
b 2f
/* When disabling/locking L2, code must be in L1 */
.balign 32
1: mtspr L2CR,r0 /* lock the L2 cache */
1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
3: sync
isync
b 1f
......@@ -258,8 +258,8 @@ flush_disable_745x:
isync
ori r0,r3,L2CR_L2HWF_745x
sync
mtspr L2CR,r0 /* set the hardware flush bit */
3: mfspr r0,L2CR /* wait for it to go to 0 */
mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
andi. r0,r0,L2CR_L2HWF_745x
bne 3b
sync
......@@ -267,7 +267,7 @@ flush_disable_745x:
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr L2CR,r3 /* disable the L2 cache */
1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
3: sync
isync
b 1f
......@@ -278,34 +278,34 @@ flush_disable_745x:
1: sync
isync
oris r4,r3,L2CR_L2I@h
mtspr L2CR,r4
mtspr SPRN_L2CR,r4
sync
isync
1: mfspr r4,L2CR
1: mfspr r4,SPRN_L2CR
andis. r0,r4,L2CR_L2I@h
bne 1b
sync
BEGIN_FTR_SECTION
/* Flush the L3 cache using the hardware assist */
4: mfspr r3,L3CR
4: mfspr r3,SPRN_L3CR
cmpwi r3,0 /* check if it is enabled */
bge 6f
oris r0,r3,L3CR_L3IO@h
ori r0,r0,L3CR_L3DO
sync
mtspr L3CR,r0 /* lock the L3 cache */
mtspr SPRN_L3CR,r0 /* lock the L3 cache */
sync
isync
ori r0,r0,L3CR_L3HWF
sync
mtspr L3CR,r0 /* set the hardware flush bit */
5: mfspr r0,L3CR /* wait for it to go to zero */
mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
andi. r0,r0,L3CR_L3HWF
bne 5b
rlwinm r3,r3,0,~L3CR_L3E
sync
mtspr L3CR,r3 /* disable the L3 cache */
mtspr SPRN_L3CR,r3 /* disable the L3 cache */
sync
ori r4,r3,L3CR_L3I
mtspr SPRN_L3CR,r4
......@@ -315,9 +315,9 @@ BEGIN_FTR_SECTION
sync
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
6: mfspr r0,HID0 /* now disable the L1 data cache */
6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
rlwinm r0,r0,0,~HID0_DCE
mtspr HID0,r0
mtspr SPRN_HID0,r0
sync
isync
mtmsr r11 /* restore DR and EE */
......
......@@ -156,7 +156,7 @@ static int __pmac dfs_set_cpu_speed(int low_speed)
static unsigned int __pmac dfs_get_cpu_speed(unsigned int cpu)
{
if (mfspr(HID1) & HID1_DFS)
if (mfspr(SPRN_HID1) & HID1_DFS)
return low_freq;
else
return hi_freq;
......@@ -542,7 +542,7 @@ static int __init pmac_cpufreq_setup(void)
set_speed_proc = pmu_set_cpu_speed;
}
/* Else check for 750FX */
else if (PVR_VER(mfspr(PVR)) == 0x7000) {
else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000) {
if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
goto out;
hi_freq = cur_freq;
......
......@@ -1796,7 +1796,7 @@ core99_sleep_state(struct device_node* node, long param, long value)
if (value == 1) {
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL,
*reg, 0x05);
} else if (value == 0 && (mfspr(HID1) & HID1_DFS)) {
} else if (value == 0 && (mfspr(SPRN_HID1) & HID1_DFS)) {
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL,
*reg, 0x04);
}
......
......@@ -247,7 +247,7 @@ pmac_setup_arch(void)
int *fp;
unsigned long pvr;
pvr = PVR_VER(mfspr(PVR));
pvr = PVR_VER(mfspr(SPRN_PVR));
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
......
......@@ -182,12 +182,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
/*
* Set the HID0 and MSR for sleep.
*/
mfspr r2,HID0
mfspr r2,SPRN_HID0
rlwinm r2,r2,0,10,7 /* clear doze, nap */
oris r2,r2,HID0_SLEEP@h
sync
isync
mtspr HID0,r2
mtspr SPRN_HID0,r2
sync
/* This loop puts us back to sleep in case we have a spurrious
......@@ -216,10 +216,10 @@ _GLOBAL(core99_wake_up)
/* Make sure HID0 no longer contains any sleep bit and that data cache
* is disabled
*/
mfspr r3,HID0
mfspr r3,SPRN_HID0
rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */
rlwinm 3,r3,0,18,15 /* clear DCE, ICE */
mtspr HID0,r3
mtspr SPRN_HID0,r3
sync
isync
......
......@@ -294,7 +294,7 @@ static int __init smp_psurge_probe(void)
int i, ncpus;
/* We don't do SMP on the PPC601 -- paulus */
if (PVR_VER(mfspr(PVR)) == 1)
if (PVR_VER(mfspr(SPRN_PVR)) == 1)
return 1;
/*
......
......@@ -849,10 +849,10 @@ static __inline__ void pplus_set_bat(void)
mb();
/* setup DBATs */
mtspr(DBAT2U, 0x80001ffe);
mtspr(DBAT2L, 0x8000002a);
mtspr(DBAT3U, 0xf0001ffe);
mtspr(DBAT3L, 0xf000002a);
mtspr(SPRN_DBAT2U, 0x80001ffe);
mtspr(SPRN_DBAT2L, 0x8000002a);
mtspr(SPRN_DBAT3U, 0xf0001ffe);
mtspr(SPRN_DBAT3L, 0xf000002a);
/* wait for updates */
mb();
......
......@@ -690,8 +690,8 @@ prep_set_bat(void)
mb();
/* setup DBATs */
mtspr(DBAT2U, 0x80001ffe);
mtspr(DBAT2L, 0x8000002a);
mtspr(SPRN_DBAT2U, 0x80001ffe);
mtspr(SPRN_DBAT2L, 0x8000002a);
/* wait for updates */
mb();
......
......@@ -302,8 +302,8 @@ static void __init prpmc750_init_IRQ(void)
static __inline__ void prpmc750_set_bat(void)
{
mb();
mtspr(DBAT1U, 0xf0001ffe);
mtspr(DBAT1L, 0xf000002a);
mtspr(SPRN_DBAT1U, 0xf0001ffe);
mtspr(SPRN_DBAT1L, 0xf000002a);
mb();
}
......
......@@ -419,8 +419,8 @@ static void __init prpmc800_init_IRQ(void)
static __inline__ void prpmc800_set_bat(void)
{
mb();
mtspr(DBAT1U, 0xf0001ffe);
mtspr(DBAT1L, 0xf000002a);
mtspr(SPRN_DBAT1U, 0xf0001ffe);
mtspr(SPRN_DBAT1L, 0xf000002a);
mb();
}
......
......@@ -278,8 +278,8 @@ static __inline__ void
spruce_set_bat(void)
{
mb();
mtspr(DBAT1U, 0xf8000ffe);
mtspr(DBAT1L, 0xf800002a);
mtspr(SPRN_DBAT1U, 0xf8000ffe);
mtspr(SPRN_DBAT1L, 0xf800002a);
mb();
}
......
......@@ -137,7 +137,7 @@ btext_prepare_BAT(void)
boot_text_mapped = 0;
return;
}
if (PVR_VER(mfspr(PVR)) != 1) {
if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
/* 603, 604, G3, G4, ... */
lowbits = addr & ~0xFF000000UL;
addr &= 0xFF000000UL;
......
......@@ -221,7 +221,7 @@ void __init ibm440gx_l2c_setup(struct ibm44x_clocks* p)
/* Disable L2C on rev.A, rev.B and 800MHz version of rev.C,
enable it on all other revisions
*/
u32 pvr = mfspr(PVR);
u32 pvr = mfspr(SPRN_PVR);
if (pvr == PVR_440GX_RA || pvr == PVR_440GX_RB ||
(pvr == PVR_440GX_RC && p->cpu > 667000000))
ibm440gx_l2c_disable();
......
......@@ -80,8 +80,8 @@ mpc52xx_set_bat(void)
* mpc52xx_find_end_of_memory, and UARTs/GPIO access for debug
*/
mb();
mtspr(DBAT2U, 0xf0001ffe);
mtspr(DBAT2L, 0xf000002a);
mtspr(SPRN_DBAT2U, 0xf0001ffe);
mtspr(SPRN_DBAT2L, 0xf000002a);
mb();
}
......
......@@ -50,12 +50,12 @@ extern void flush_dcache_all(void);
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
*/
#define IC_CST 560 /* Instruction cache control/status */
#define IC_ADR 561 /* Address needed for some commands */
#define IC_DAT 562 /* Read-only data register */
#define DC_CST 568 /* Data cache control/status */
#define DC_ADR 569 /* Address needed for some commands */
#define DC_DAT 570 /* Read-only data register */
#define SPRN_IC_CST 560 /* Instruction cache control/status */
#define SPRN_IC_ADR 561 /* Address needed for some commands */
#define SPRN_IC_DAT 562 /* Read-only data register */
#define SPRN_DC_CST 568 /* Data cache control/status */
#define SPRN_DC_ADR 569 /* Address needed for some commands */
#define SPRN_DC_DAT 570 /* Read-only data register */
/* Commands. Only the first few are available to the instruction cache.
*/
......
......@@ -152,7 +152,7 @@ typedef struct _P601_BAT {
* is written, and the contents of several registers are used to
* create the entry.
*/
#define MI_CTR 784 /* Instruction TLB control register */
#define SPRN_MI_CTR 784 /* Instruction TLB control register */
#define MI_GPM 0x80000000 /* Set domain manager mode */
#define MI_PPM 0x40000000 /* Set subpage protection */
#define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
......@@ -164,7 +164,7 @@ typedef struct _P601_BAT {
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
*/
#define MI_AP 786
#define SPRN_MI_AP 786
#define MI_Ks 0x80000000 /* Should not be set */
#define MI_Kp 0x40000000 /* Should always be set */
......@@ -172,7 +172,7 @@ typedef struct _P601_BAT {
* about the last instruction TLB miss. When MI_RPN is written, bits in
* this register are used to create the TLB entry.
*/
#define MI_EPN 787
#define SPRN_MI_EPN 787
#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
#define MI_EVALID 0x00000200 /* Entry is valid */
#define MI_ASIDMASK 0x0000000f /* ASID match value */
......@@ -182,7 +182,7 @@ typedef struct _P601_BAT {
* For the instruction TLB, it contains bits that get loaded into the
* TLB entry when the MI_RPN is written.
*/
#define MI_TWC 789
#define SPRN_MI_TWC 789
#define MI_APG 0x000001e0 /* Access protection group (0) */
#define MI_GUARDED 0x00000010 /* Guarded storage */
#define MI_PSMASK 0x0000000c /* Mask of page size bits */
......@@ -196,7 +196,7 @@ typedef struct _P601_BAT {
* causes a TLB entry to be created for the instruction TLB, using
* additional information from the MI_EPN, and MI_TWC registers.
*/
#define MI_RPN 790
#define SPRN_MI_RPN 790
/* Define an RPN value for mapping kernel memory to large virtual
* pages for boot initialization. This has real page number of 0,
......@@ -205,7 +205,7 @@ typedef struct _P601_BAT {
*/
#define MI_BOOTINIT 0x000001fd
#define MD_CTR 792 /* Data TLB control register */
#define SPRN_MD_CTR 792 /* Data TLB control register */
#define MD_GPM 0x80000000 /* Set domain manager mode */
#define MD_PPM 0x40000000 /* Set subpage protection */
#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
......@@ -216,14 +216,14 @@ typedef struct _P601_BAT {
#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
#define MD_RESETVAL 0x04000000 /* Value of register at reset */
#define M_CASID 793 /* Address space ID (context) to match */
#define SPRN_M_CASID 793 /* Address space ID (context) to match */
#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
*/
#define MD_AP 794
#define SPRN_MD_AP 794
#define MD_Ks 0x80000000 /* Should not be set */
#define MD_Kp 0x40000000 /* Should always be set */
......@@ -231,7 +231,7 @@ typedef struct _P601_BAT {
* about the last instruction TLB miss. When MD_RPN is written, bits in
* this register are used to create the TLB entry.
*/
#define MD_EPN 795
#define SPRN_MD_EPN 795
#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
#define MD_EVALID 0x00000200 /* Entry is valid */
#define MD_ASIDMASK 0x0000000f /* ASID match value */
......@@ -241,7 +241,7 @@ typedef struct _P601_BAT {
* During a software tablewalk, reading this register provides the address
* of the entry associated with MD_EPN.
*/
#define M_TWB 796
#define SPRN_M_TWB 796
#define M_L1TB 0xfffff000 /* Level 1 table base address */
#define M_L1INDX 0x00000ffc /* Level 1 index, when read */
/* Reset value is undefined */
......@@ -251,7 +251,7 @@ typedef struct _P601_BAT {
* when the MD_RPN is written. It is also provides the hardware assist
* for finding the PTE address during software tablewalk.
*/
#define MD_TWC 797
#define SPRN_MD_TWC 797
#define MD_L2TB 0xfffff000 /* Level 2 table base address */
#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
#define MD_APG 0x000001e0 /* Access protection group (0) */
......@@ -269,12 +269,12 @@ typedef struct _P601_BAT {
* causes a TLB entry to be created for the data TLB, using
* additional information from the MD_EPN, and MD_TWC registers.
*/
#define MD_RPN 798
#define SPRN_MD_RPN 798
/* This is a temporary storage register that could be used to save
* a processor working register during a tablewalk.
*/
#define M_TW 799
#define SPRN_M_TW 799
/*
* At present, all PowerPC 400-class processors share a similar TLB
......
......@@ -335,91 +335,6 @@
#define MMCR0_PMC2_LOADMISSTIME 0x5
#define MMCR0_PMXE (1 << 26)
/* Short-hand versions for a number of the above SPRNs */
#define CTR SPRN_CTR /* Counter Register */
#define DAR SPRN_DAR /* Data Address Register */
#define DABR SPRN_DABR /* Data Address Breakpoint Register */
#define DBAT0L SPRN_DBAT0L /* Data BAT 0 Lower Register */
#define DBAT0U SPRN_DBAT0U /* Data BAT 0 Upper Register */
#define DBAT1L SPRN_DBAT1L /* Data BAT 1 Lower Register */
#define DBAT1U SPRN_DBAT1U /* Data BAT 1 Upper Register */
#define DBAT2L SPRN_DBAT2L /* Data BAT 2 Lower Register */
#define DBAT2U SPRN_DBAT2U /* Data BAT 2 Upper Register */
#define DBAT3L SPRN_DBAT3L /* Data BAT 3 Lower Register */
#define DBAT3U SPRN_DBAT3U /* Data BAT 3 Upper Register */
#define DBAT4L SPRN_DBAT4L /* Data BAT 4 Lower Register */
#define DBAT4U SPRN_DBAT4U /* Data BAT 4 Upper Register */
#define DBAT5L SPRN_DBAT5L /* Data BAT 5 Lower Register */
#define DBAT5U SPRN_DBAT5U /* Data BAT 5 Upper Register */
#define DBAT6L SPRN_DBAT6L /* Data BAT 6 Lower Register */
#define DBAT6U SPRN_DBAT6U /* Data BAT 6 Upper Register */
#define DBAT7L SPRN_DBAT7L /* Data BAT 7 Lower Register */
#define DBAT7U SPRN_DBAT7U /* Data BAT 7 Upper Register */
//#define DEC SPRN_DEC /* Decrement Register */
#define DMISS SPRN_DMISS /* Data TLB Miss Register */
#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
#define EAR SPRN_EAR /* External Address Register */
#define HASH1 SPRN_HASH1 /* Primary Hash Address Register */
#define HASH2 SPRN_HASH2 /* Secondary Hash Address Register */
#define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
#define HID1 SPRN_HID1 /* Hardware Implementation Register 1 */
#define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
#define IBAT0L SPRN_IBAT0L /* Instruction BAT 0 Lower Register */
#define IBAT0U SPRN_IBAT0U /* Instruction BAT 0 Upper Register */
#define IBAT1L SPRN_IBAT1L /* Instruction BAT 1 Lower Register */
#define IBAT1U SPRN_IBAT1U /* Instruction BAT 1 Upper Register */
#define IBAT2L SPRN_IBAT2L /* Instruction BAT 2 Lower Register */
#define IBAT2U SPRN_IBAT2U /* Instruction BAT 2 Upper Register */
#define IBAT3L SPRN_IBAT3L /* Instruction BAT 3 Lower Register */
#define IBAT3U SPRN_IBAT3U /* Instruction BAT 3 Upper Register */
#define IBAT4L SPRN_IBAT4L /* Instruction BAT 4 Lower Register */
#define IBAT4U SPRN_IBAT4U /* Instruction BAT 4 Upper Register */
#define IBAT5L SPRN_IBAT5L /* Instruction BAT 5 Lower Register */
#define IBAT5U SPRN_IBAT5U /* Instruction BAT 5 Upper Register */
#define IBAT6L SPRN_IBAT6L /* Instruction BAT 6 Lower Register */
#define IBAT6U SPRN_IBAT6U /* Instruction BAT 6 Upper Register */
#define IBAT7L SPRN_IBAT7L /* Instruction BAT 7 Lower Register */
#define IBAT7U SPRN_IBAT7U /* Instruction BAT 7 Upper Register */
#define ICMP SPRN_ICMP /* Instruction TLB Compare Register */
#define IMISS SPRN_IMISS /* Instruction TLB Miss Register */
#define IMMR SPRN_IMMR /* PPC 860/821 Internal Memory Map Register */
#define L2CR SPRN_L2CR /* Classic PPC L2 cache control register */
#define L3CR SPRN_L3CR /* PPC 745x L3 cache control register */
//#define LR SPRN_LR
#define PVR SPRN_PVR /* Processor Version */
//#define RPA SPRN_RPA /* Required Physical Address Register */
#define SDR1 SPRN_SDR1 /* MMU hash base register */
#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
#define SPR1 SPRN_SPRG1
#define SPR2 SPRN_SPRG2
#define SPR3 SPRN_SPRG3
#define SPR4 SPRN_SPRG4
#define SPR5 SPRN_SPRG5
#define SPR6 SPRN_SPRG6
#define SPR7 SPRN_SPRG7
#define SPRG0 SPRN_SPRG0
#define SPRG1 SPRN_SPRG1
#define SPRG2 SPRN_SPRG2
#define SPRG3 SPRN_SPRG3
#define SPRG4 SPRN_SPRG4
#define SPRG5 SPRN_SPRG5
#define SPRG6 SPRN_SPRG6
#define SPRG7 SPRN_SPRG7
#define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
#define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
#define SRR2 SPRN_SRR2 /* Save and Restore Register 2 */
#define SRR3 SPRN_SRR3 /* Save and Restore Register 3 */
#define SVR SPRN_SVR /* System Version Register */
#define ICTC SPRN_ICTC /* Instruction Cache Throttling Control Reg */
#define THRM1 SPRN_THRM1 /* Thermal Management Register 1 */
#define THRM2 SPRN_THRM2 /* Thermal Management Register 2 */
#define THRM3 SPRN_THRM3 /* Thermal Management Register 3 */
#define XER SPRN_XER
#define TBRL SPRN_TBRL /* Time Base Read Lower Register */
#define TBRU SPRN_TBRU /* Time Base Read Upper Register */
#define TBWL SPRN_TBWL /* Time Base Write Lower Register */
#define TBWU SPRN_TBWU /* Time Base Write Upper Register */
/* Processor Version Register */
/* Processor Version Register (PVR) field extraction */
......
......@@ -427,26 +427,6 @@ do { \
#define SPEFSCR_FOVFE 0x00000004 /* Embedded FP overflow enable */
#define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */
/* Short-hand for various SPRs. */
#ifdef CONFIG_BOOKE
#define CSRR0 SPRN_CSRR0 /* Critical Save and Restore Register 0 */
#define CSRR1 SPRN_CSRR1 /* Critical Save and Restore Register 1 */
#else
#define CSRR0 SPRN_SRR2 /* Logically and functionally equivalent. */
#define CSRR1 SPRN_SRR3 /* Logically and functionally equivalent. */
#endif
#define MCSRR0 SPRN_MCSRR0 /* Machine Check Save and Restore Register 0 */
#define MCSRR1 SPRN_MCSRR1 /* Machine Check Save and Restore Register 1 */
#define DCMP SPRN_DCMP /* Data TLB Compare Register */
#define SPRG4R SPRN_SPRG4R /* Supervisor Private Registers */
#define SPRG5R SPRN_SPRG5R
#define SPRG6R SPRN_SPRG6R
#define SPRG7R SPRN_SPRG7R
#define SPRG4W SPRN_SPRG4W
#define SPRG5W SPRN_SPRG5W
#define SPRG6W SPRN_SPRG6W
#define SPRG7W SPRN_SPRG7W
/*
* The IBM-403 is an even more odd special case, as it is much
* older than the IBM-405 series. We put these down here incase someone
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment