Commit c75df6f9 authored by Michael Neuling's avatar Michael Neuling Committed by Benjamin Herrenschmidt

powerpc: Fix usage of register macros getting ready for %r0 change

Anything that uses a constructed instruction (ie. from ppc-opcode.h),
need to use the new R0 macro, as %r0 is not going to work.

Also convert usages of macros where we are just determining an offset
(usually for a load/store), like:
	std	r14,STK_REG(r14)(r1)
Can't use STK_REG(r14) as %r14 doesn't work in the STK_REG macro since
it's just calculating an offset.
Signed-off-by: default avatarMichael Neuling <mikey@neuling.org>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 564aa5cf
...@@ -100,19 +100,19 @@ _icswx_skip_guest: ...@@ -100,19 +100,19 @@ _icswx_skip_guest:
lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4 mtspr SPRN_MMUCR0, r4
li r4,A2_IERAT_SIZE-1 li r4,A2_IERAT_SIZE-1
PPC_ERATWE(r4,r4,3) PPC_ERATWE(R4,R4,3)
/* Now set the D-ERAT watermark to 31 */ /* Now set the D-ERAT watermark to 31 */
lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4 mtspr SPRN_MMUCR0, r4
li r4,A2_DERAT_SIZE-1 li r4,A2_DERAT_SIZE-1
PPC_ERATWE(r4,r4,3) PPC_ERATWE(R4,R4,3)
/* And invalidate the beast just in case. That won't get rid of /* And invalidate the beast just in case. That won't get rid of
* a bolted entry though it will be in LRU and so will go away eventually * a bolted entry though it will be in LRU and so will go away eventually
* but let's not bother for now * but let's not bother for now
*/ */
PPC_ERATILX(0,0,0) PPC_ERATILX(0,R0,R0)
1: 1:
blr blr
......
...@@ -106,7 +106,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -106,7 +106,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif #endif
lfd fr0,THREAD_FPSCR(r5) lfd fr0,THREAD_FPSCR(r5)
MTFSF_L(fr0) MTFSF_L(fr0)
REST_32FPVSRS(0, r4, r5) REST_32FPVSRS(0, R4, R5)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
subi r4,r5,THREAD subi r4,r5,THREAD
fromreal(r4) fromreal(r4)
...@@ -140,7 +140,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -140,7 +140,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
addi r3,r3,THREAD /* want THREAD of task */ addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r5,PT_REGS(r3) PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0 PPC_LCMPI 0,r5,0
SAVE_32FPVSRS(0, r4 ,r3) SAVE_32FPVSRS(0, R4 ,R3)
mffs fr0 mffs fr0
stfd fr0,THREAD_FPSCR(r3) stfd fr0,THREAD_FPSCR(r3)
beq 1f beq 1f
......
...@@ -302,7 +302,7 @@ static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) ...@@ -302,7 +302,7 @@ static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
if (imm_one) { if (imm_one) {
p[kvm_emulate_wrtee_reg_offs] = p[kvm_emulate_wrtee_reg_offs] =
KVM_INST_LI | __PPC_RT(30) | MSR_EE; KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
} else { } else {
/* Make clobbered registers work too */ /* Make clobbered registers work too */
switch (get_rt(rt)) { switch (get_rt(rt)) {
......
...@@ -314,7 +314,7 @@ _GLOBAL(real_205_readb) ...@@ -314,7 +314,7 @@ _GLOBAL(real_205_readb)
mtmsrd r0 mtmsrd r0
sync sync
isync isync
LBZCIX(r3,0,r3) LBZCIX(R3,0,R3)
isync isync
mtmsrd r7 mtmsrd r7
sync sync
...@@ -329,7 +329,7 @@ _GLOBAL(real_205_writeb) ...@@ -329,7 +329,7 @@ _GLOBAL(real_205_writeb)
mtmsrd r0 mtmsrd r0
sync sync
isync isync
STBCIX(r3,0,r4) STBCIX(R3,0,R4)
isync isync
mtmsrd r7 mtmsrd r7
sync sync
......
...@@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) ...@@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* Load up FP, VMX and VSX registers */ /* Load up FP, VMX and VSX registers */
bl kvmppc_load_fp bl kvmppc_load_fp
ld r14, VCPU_GPR(r14)(r4) ld r14, VCPU_GPR(R14)(r4)
ld r15, VCPU_GPR(r15)(r4) ld r15, VCPU_GPR(R15)(r4)
ld r16, VCPU_GPR(r16)(r4) ld r16, VCPU_GPR(R16)(r4)
ld r17, VCPU_GPR(r17)(r4) ld r17, VCPU_GPR(R17)(r4)
ld r18, VCPU_GPR(r18)(r4) ld r18, VCPU_GPR(R18)(r4)
ld r19, VCPU_GPR(r19)(r4) ld r19, VCPU_GPR(R19)(r4)
ld r20, VCPU_GPR(r20)(r4) ld r20, VCPU_GPR(R20)(r4)
ld r21, VCPU_GPR(r21)(r4) ld r21, VCPU_GPR(R21)(r4)
ld r22, VCPU_GPR(r22)(r4) ld r22, VCPU_GPR(R22)(r4)
ld r23, VCPU_GPR(r23)(r4) ld r23, VCPU_GPR(R23)(r4)
ld r24, VCPU_GPR(r24)(r4) ld r24, VCPU_GPR(R24)(r4)
ld r25, VCPU_GPR(r25)(r4) ld r25, VCPU_GPR(R25)(r4)
ld r26, VCPU_GPR(r26)(r4) ld r26, VCPU_GPR(R26)(r4)
ld r27, VCPU_GPR(r27)(r4) ld r27, VCPU_GPR(R27)(r4)
ld r28, VCPU_GPR(r28)(r4) ld r28, VCPU_GPR(R28)(r4)
ld r29, VCPU_GPR(r29)(r4) ld r29, VCPU_GPR(R29)(r4)
ld r30, VCPU_GPR(r30)(r4) ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(r31)(r4) ld r31, VCPU_GPR(R31)(r4)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Switch DSCR to guest value */ /* Switch DSCR to guest value */
...@@ -547,21 +547,21 @@ fast_guest_return: ...@@ -547,21 +547,21 @@ fast_guest_return:
mtlr r5 mtlr r5
mtcr r6 mtcr r6
ld r0, VCPU_GPR(r0)(r4) ld r0, VCPU_GPR(R0)(r4)
ld r1, VCPU_GPR(r1)(r4) ld r1, VCPU_GPR(R1)(r4)
ld r2, VCPU_GPR(r2)(r4) ld r2, VCPU_GPR(R2)(r4)
ld r3, VCPU_GPR(r3)(r4) ld r3, VCPU_GPR(R3)(r4)
ld r5, VCPU_GPR(r5)(r4) ld r5, VCPU_GPR(R5)(r4)
ld r6, VCPU_GPR(r6)(r4) ld r6, VCPU_GPR(R6)(r4)
ld r7, VCPU_GPR(r7)(r4) ld r7, VCPU_GPR(R7)(r4)
ld r8, VCPU_GPR(r8)(r4) ld r8, VCPU_GPR(R8)(r4)
ld r9, VCPU_GPR(r9)(r4) ld r9, VCPU_GPR(R9)(r4)
ld r10, VCPU_GPR(r10)(r4) ld r10, VCPU_GPR(R10)(r4)
ld r11, VCPU_GPR(r11)(r4) ld r11, VCPU_GPR(R11)(r4)
ld r12, VCPU_GPR(r12)(r4) ld r12, VCPU_GPR(R12)(r4)
ld r13, VCPU_GPR(r13)(r4) ld r13, VCPU_GPR(R13)(r4)
ld r4, VCPU_GPR(r4)(r4) ld r4, VCPU_GPR(R4)(r4)
hrfid hrfid
b . b .
...@@ -590,22 +590,22 @@ kvmppc_interrupt: ...@@ -590,22 +590,22 @@ kvmppc_interrupt:
/* Save registers */ /* Save registers */
std r0, VCPU_GPR(r0)(r9) std r0, VCPU_GPR(R0)(r9)
std r1, VCPU_GPR(r1)(r9) std r1, VCPU_GPR(R1)(r9)
std r2, VCPU_GPR(r2)(r9) std r2, VCPU_GPR(R2)(r9)
std r3, VCPU_GPR(r3)(r9) std r3, VCPU_GPR(R3)(r9)
std r4, VCPU_GPR(r4)(r9) std r4, VCPU_GPR(R4)(r9)
std r5, VCPU_GPR(r5)(r9) std r5, VCPU_GPR(R5)(r9)
std r6, VCPU_GPR(r6)(r9) std r6, VCPU_GPR(R6)(r9)
std r7, VCPU_GPR(r7)(r9) std r7, VCPU_GPR(R7)(r9)
std r8, VCPU_GPR(r8)(r9) std r8, VCPU_GPR(R8)(r9)
ld r0, HSTATE_HOST_R2(r13) ld r0, HSTATE_HOST_R2(r13)
std r0, VCPU_GPR(r9)(r9) std r0, VCPU_GPR(R9)(r9)
std r10, VCPU_GPR(r10)(r9) std r10, VCPU_GPR(R10)(r9)
std r11, VCPU_GPR(r11)(r9) std r11, VCPU_GPR(R11)(r9)
ld r3, HSTATE_SCRATCH0(r13) ld r3, HSTATE_SCRATCH0(r13)
lwz r4, HSTATE_SCRATCH1(r13) lwz r4, HSTATE_SCRATCH1(r13)
std r3, VCPU_GPR(r12)(r9) std r3, VCPU_GPR(R12)(r9)
stw r4, VCPU_CR(r9) stw r4, VCPU_CR(r9)
/* Restore R1/R2 so we can handle faults */ /* Restore R1/R2 so we can handle faults */
...@@ -626,7 +626,7 @@ kvmppc_interrupt: ...@@ -626,7 +626,7 @@ kvmppc_interrupt:
GET_SCRATCH0(r3) GET_SCRATCH0(r3)
mflr r4 mflr r4
std r3, VCPU_GPR(r13)(r9) std r3, VCPU_GPR(R13)(r9)
std r4, VCPU_LR(r9) std r4, VCPU_LR(r9)
/* Unset guest mode */ /* Unset guest mode */
...@@ -968,24 +968,24 @@ BEGIN_FTR_SECTION ...@@ -968,24 +968,24 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Save non-volatile GPRs */ /* Save non-volatile GPRs */
std r14, VCPU_GPR(r14)(r9) std r14, VCPU_GPR(R14)(r9)
std r15, VCPU_GPR(r15)(r9) std r15, VCPU_GPR(R15)(r9)
std r16, VCPU_GPR(r16)(r9) std r16, VCPU_GPR(R16)(r9)
std r17, VCPU_GPR(r17)(r9) std r17, VCPU_GPR(R17)(r9)
std r18, VCPU_GPR(r18)(r9) std r18, VCPU_GPR(R18)(r9)
std r19, VCPU_GPR(r19)(r9) std r19, VCPU_GPR(R19)(r9)
std r20, VCPU_GPR(r20)(r9) std r20, VCPU_GPR(R20)(r9)
std r21, VCPU_GPR(r21)(r9) std r21, VCPU_GPR(R21)(r9)
std r22, VCPU_GPR(r22)(r9) std r22, VCPU_GPR(R22)(r9)
std r23, VCPU_GPR(r23)(r9) std r23, VCPU_GPR(R23)(r9)
std r24, VCPU_GPR(r24)(r9) std r24, VCPU_GPR(R24)(r9)
std r25, VCPU_GPR(r25)(r9) std r25, VCPU_GPR(R25)(r9)
std r26, VCPU_GPR(r26)(r9) std r26, VCPU_GPR(R26)(r9)
std r27, VCPU_GPR(r27)(r9) std r27, VCPU_GPR(R27)(r9)
std r28, VCPU_GPR(r28)(r9) std r28, VCPU_GPR(R28)(r9)
std r29, VCPU_GPR(r29)(r9) std r29, VCPU_GPR(R29)(r9)
std r30, VCPU_GPR(r30)(r9) std r30, VCPU_GPR(R30)(r9)
std r31, VCPU_GPR(r31)(r9) std r31, VCPU_GPR(R31)(r9)
/* Save SPRGs */ /* Save SPRGs */
mfspr r3, SPRN_SPRG0 mfspr r3, SPRN_SPRG0
...@@ -1160,7 +1160,7 @@ kvmppc_hdsi: ...@@ -1160,7 +1160,7 @@ kvmppc_hdsi:
andi. r0, r11, MSR_DR /* data relocation enabled? */ andi. r0, r11, MSR_DR /* data relocation enabled? */
beq 3f beq 3f
clrrdi r0, r4, 28 clrrdi r0, r4, 28
PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
bne 1f /* if no SLB entry found */ bne 1f /* if no SLB entry found */
4: std r4, VCPU_FAULT_DAR(r9) 4: std r4, VCPU_FAULT_DAR(r9)
stw r6, VCPU_FAULT_DSISR(r9) stw r6, VCPU_FAULT_DSISR(r9)
...@@ -1234,7 +1234,7 @@ kvmppc_hisi: ...@@ -1234,7 +1234,7 @@ kvmppc_hisi:
andi. r0, r11, MSR_IR /* instruction relocation enabled? */ andi. r0, r11, MSR_IR /* instruction relocation enabled? */
beq 3f beq 3f
clrrdi r0, r10, 28 clrrdi r0, r10, 28
PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
bne 1f /* if no SLB entry found */ bne 1f /* if no SLB entry found */
4: 4:
/* Search the hash table. */ /* Search the hash table. */
...@@ -1278,7 +1278,7 @@ kvmppc_hisi: ...@@ -1278,7 +1278,7 @@ kvmppc_hisi:
*/ */
.globl hcall_try_real_mode .globl hcall_try_real_mode
hcall_try_real_mode: hcall_try_real_mode:
ld r3,VCPU_GPR(r3)(r9) ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR andi. r0,r11,MSR_PR
bne hcall_real_cont bne hcall_real_cont
clrrdi r3,r3,2 clrrdi r3,r3,2
...@@ -1291,12 +1291,12 @@ hcall_try_real_mode: ...@@ -1291,12 +1291,12 @@ hcall_try_real_mode:
add r3,r3,r4 add r3,r3,r4
mtctr r3 mtctr r3
mr r3,r9 /* get vcpu pointer */ mr r3,r9 /* get vcpu pointer */
ld r4,VCPU_GPR(r4)(r9) ld r4,VCPU_GPR(R4)(r9)
bctrl bctrl
cmpdi r3,H_TOO_HARD cmpdi r3,H_TOO_HARD
beq hcall_real_fallback beq hcall_real_fallback
ld r4,HSTATE_KVM_VCPU(r13) ld r4,HSTATE_KVM_VCPU(r13)
std r3,VCPU_GPR(r3)(r4) std r3,VCPU_GPR(R3)(r4)
ld r10,VCPU_PC(r4) ld r10,VCPU_PC(r4)
ld r11,VCPU_MSR(r4) ld r11,VCPU_MSR(r4)
b fast_guest_return b fast_guest_return
...@@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede) ...@@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede)
li r0,0 /* set trap to 0 to say hcall is handled */ li r0,0 /* set trap to 0 to say hcall is handled */
stw r0,VCPU_TRAP(r3) stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS li r0,H_SUCCESS
std r0,VCPU_GPR(r3)(r3) std r0,VCPU_GPR(R3)(r3)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b 2f /* just send it up to host on 970 */ b 2f /* just send it up to host on 970 */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
...@@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) ...@@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
addi r6,r5,VCORE_NAPPING_THREADS addi r6,r5,VCORE_NAPPING_THREADS
31: lwarx r4,0,r6 31: lwarx r4,0,r6
or r4,r4,r0 or r4,r4,r0
PPC_POPCNTW(r7,r4) PPC_POPCNTW(R7,R4)
cmpw r7,r8 cmpw r7,r8
bge 2f bge 2f
stwcx. r4,0,r6 stwcx. r4,0,r6
...@@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) ...@@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
*/ */
/* Save non-volatile GPRs */ /* Save non-volatile GPRs */
std r14, VCPU_GPR(r14)(r3) std r14, VCPU_GPR(R14)(r3)
std r15, VCPU_GPR(r15)(r3) std r15, VCPU_GPR(R15)(r3)
std r16, VCPU_GPR(r16)(r3) std r16, VCPU_GPR(R16)(r3)
std r17, VCPU_GPR(r17)(r3) std r17, VCPU_GPR(R17)(r3)
std r18, VCPU_GPR(r18)(r3) std r18, VCPU_GPR(R18)(r3)
std r19, VCPU_GPR(r19)(r3) std r19, VCPU_GPR(R19)(r3)
std r20, VCPU_GPR(r20)(r3) std r20, VCPU_GPR(R20)(r3)
std r21, VCPU_GPR(r21)(r3) std r21, VCPU_GPR(R21)(r3)
std r22, VCPU_GPR(r22)(r3) std r22, VCPU_GPR(R22)(r3)
std r23, VCPU_GPR(r23)(r3) std r23, VCPU_GPR(R23)(r3)
std r24, VCPU_GPR(r24)(r3) std r24, VCPU_GPR(R24)(r3)
std r25, VCPU_GPR(r25)(r3) std r25, VCPU_GPR(R25)(r3)
std r26, VCPU_GPR(r26)(r3) std r26, VCPU_GPR(R26)(r3)
std r27, VCPU_GPR(r27)(r3) std r27, VCPU_GPR(R27)(r3)
std r28, VCPU_GPR(r28)(r3) std r28, VCPU_GPR(R28)(r3)
std r29, VCPU_GPR(r29)(r3) std r29, VCPU_GPR(R29)(r3)
std r30, VCPU_GPR(r30)(r3) std r30, VCPU_GPR(R30)(r3)
std r31, VCPU_GPR(r31)(r3) std r31, VCPU_GPR(R31)(r3)
/* save FP state */ /* save FP state */
bl .kvmppc_save_fp bl .kvmppc_save_fp
...@@ -1513,24 +1513,24 @@ kvm_end_cede: ...@@ -1513,24 +1513,24 @@ kvm_end_cede:
bl kvmppc_load_fp bl kvmppc_load_fp
/* Load NV GPRS */ /* Load NV GPRS */
ld r14, VCPU_GPR(r14)(r4) ld r14, VCPU_GPR(R14)(r4)
ld r15, VCPU_GPR(r15)(r4) ld r15, VCPU_GPR(R15)(r4)
ld r16, VCPU_GPR(r16)(r4) ld r16, VCPU_GPR(R16)(r4)
ld r17, VCPU_GPR(r17)(r4) ld r17, VCPU_GPR(R17)(r4)
ld r18, VCPU_GPR(r18)(r4) ld r18, VCPU_GPR(R18)(r4)
ld r19, VCPU_GPR(r19)(r4) ld r19, VCPU_GPR(R19)(r4)
ld r20, VCPU_GPR(r20)(r4) ld r20, VCPU_GPR(R20)(r4)
ld r21, VCPU_GPR(r21)(r4) ld r21, VCPU_GPR(R21)(r4)
ld r22, VCPU_GPR(r22)(r4) ld r22, VCPU_GPR(R22)(r4)
ld r23, VCPU_GPR(r23)(r4) ld r23, VCPU_GPR(R23)(r4)
ld r24, VCPU_GPR(r24)(r4) ld r24, VCPU_GPR(R24)(r4)
ld r25, VCPU_GPR(r25)(r4) ld r25, VCPU_GPR(R25)(r4)
ld r26, VCPU_GPR(r26)(r4) ld r26, VCPU_GPR(R26)(r4)
ld r27, VCPU_GPR(r27)(r4) ld r27, VCPU_GPR(R27)(r4)
ld r28, VCPU_GPR(r28)(r4) ld r28, VCPU_GPR(R28)(r4)
ld r29, VCPU_GPR(r29)(r4) ld r29, VCPU_GPR(R29)(r4)
ld r30, VCPU_GPR(r30)(r4) ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(r31)(r4) ld r31, VCPU_GPR(R31)(r4)
/* clear our bit in vcore->napping_threads */ /* clear our bit in vcore->napping_threads */
33: ld r5,HSTATE_KVM_VCORE(r13) 33: ld r5,HSTATE_KVM_VCORE(r13)
...@@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION ...@@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION
reg = 0 reg = 0
.rept 32 .rept 32
li r6,reg*16+VCPU_VSRS li r6,reg*16+VCPU_VSRS
STXVD2X(reg,r6,r3) STXVD2X(reg,R6,R3)
reg = reg + 1 reg = reg + 1
.endr .endr
FTR_SECTION_ELSE FTR_SECTION_ELSE
...@@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION ...@@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION
reg = 0 reg = 0
.rept 32 .rept 32
li r7,reg*16+VCPU_VSRS li r7,reg*16+VCPU_VSRS
LXVD2X(reg,r7,r4) LXVD2X(reg,R7,R4)
reg = reg + 1 reg = reg + 1
.endr .endr
FTR_SECTION_ELSE FTR_SECTION_ELSE
......
...@@ -39,24 +39,24 @@ ...@@ -39,24 +39,24 @@
#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
#define VCPU_LOAD_NVGPRS(vcpu) \ #define VCPU_LOAD_NVGPRS(vcpu) \
PPC_LL r14, VCPU_GPR(r14)(vcpu); \ PPC_LL r14, VCPU_GPR(R14)(vcpu); \
PPC_LL r15, VCPU_GPR(r15)(vcpu); \ PPC_LL r15, VCPU_GPR(R15)(vcpu); \
PPC_LL r16, VCPU_GPR(r16)(vcpu); \ PPC_LL r16, VCPU_GPR(R16)(vcpu); \
PPC_LL r17, VCPU_GPR(r17)(vcpu); \ PPC_LL r17, VCPU_GPR(R17)(vcpu); \
PPC_LL r18, VCPU_GPR(r18)(vcpu); \ PPC_LL r18, VCPU_GPR(R18)(vcpu); \
PPC_LL r19, VCPU_GPR(r19)(vcpu); \ PPC_LL r19, VCPU_GPR(R19)(vcpu); \
PPC_LL r20, VCPU_GPR(r20)(vcpu); \ PPC_LL r20, VCPU_GPR(R20)(vcpu); \
PPC_LL r21, VCPU_GPR(r21)(vcpu); \ PPC_LL r21, VCPU_GPR(R21)(vcpu); \
PPC_LL r22, VCPU_GPR(r22)(vcpu); \ PPC_LL r22, VCPU_GPR(R22)(vcpu); \
PPC_LL r23, VCPU_GPR(r23)(vcpu); \ PPC_LL r23, VCPU_GPR(R23)(vcpu); \
PPC_LL r24, VCPU_GPR(r24)(vcpu); \ PPC_LL r24, VCPU_GPR(R24)(vcpu); \
PPC_LL r25, VCPU_GPR(r25)(vcpu); \ PPC_LL r25, VCPU_GPR(R25)(vcpu); \
PPC_LL r26, VCPU_GPR(r26)(vcpu); \ PPC_LL r26, VCPU_GPR(R26)(vcpu); \
PPC_LL r27, VCPU_GPR(r27)(vcpu); \ PPC_LL r27, VCPU_GPR(R27)(vcpu); \
PPC_LL r28, VCPU_GPR(r28)(vcpu); \ PPC_LL r28, VCPU_GPR(R28)(vcpu); \
PPC_LL r29, VCPU_GPR(r29)(vcpu); \ PPC_LL r29, VCPU_GPR(R29)(vcpu); \
PPC_LL r30, VCPU_GPR(r30)(vcpu); \ PPC_LL r30, VCPU_GPR(R30)(vcpu); \
PPC_LL r31, VCPU_GPR(r31)(vcpu); \ PPC_LL r31, VCPU_GPR(R31)(vcpu); \
/***************************************************************************** /*****************************************************************************
* * * *
...@@ -131,24 +131,24 @@ kvmppc_handler_highmem: ...@@ -131,24 +131,24 @@ kvmppc_handler_highmem:
/* R7 = vcpu */ /* R7 = vcpu */
PPC_LL r7, GPR4(r1) PPC_LL r7, GPR4(r1)
PPC_STL r14, VCPU_GPR(r14)(r7) PPC_STL r14, VCPU_GPR(R14)(r7)
PPC_STL r15, VCPU_GPR(r15)(r7) PPC_STL r15, VCPU_GPR(R15)(r7)
PPC_STL r16, VCPU_GPR(r16)(r7) PPC_STL r16, VCPU_GPR(R16)(r7)
PPC_STL r17, VCPU_GPR(r17)(r7) PPC_STL r17, VCPU_GPR(R17)(r7)
PPC_STL r18, VCPU_GPR(r18)(r7) PPC_STL r18, VCPU_GPR(R18)(r7)
PPC_STL r19, VCPU_GPR(r19)(r7) PPC_STL r19, VCPU_GPR(R19)(r7)
PPC_STL r20, VCPU_GPR(r20)(r7) PPC_STL r20, VCPU_GPR(R20)(r7)
PPC_STL r21, VCPU_GPR(r21)(r7) PPC_STL r21, VCPU_GPR(R21)(r7)
PPC_STL r22, VCPU_GPR(r22)(r7) PPC_STL r22, VCPU_GPR(R22)(r7)
PPC_STL r23, VCPU_GPR(r23)(r7) PPC_STL r23, VCPU_GPR(R23)(r7)
PPC_STL r24, VCPU_GPR(r24)(r7) PPC_STL r24, VCPU_GPR(R24)(r7)
PPC_STL r25, VCPU_GPR(r25)(r7) PPC_STL r25, VCPU_GPR(R25)(r7)
PPC_STL r26, VCPU_GPR(r26)(r7) PPC_STL r26, VCPU_GPR(R26)(r7)
PPC_STL r27, VCPU_GPR(r27)(r7) PPC_STL r27, VCPU_GPR(R27)(r7)
PPC_STL r28, VCPU_GPR(r28)(r7) PPC_STL r28, VCPU_GPR(R28)(r7)
PPC_STL r29, VCPU_GPR(r29)(r7) PPC_STL r29, VCPU_GPR(R29)(r7)
PPC_STL r30, VCPU_GPR(r30)(r7) PPC_STL r30, VCPU_GPR(R30)(r7)
PPC_STL r31, VCPU_GPR(r31)(r7) PPC_STL r31, VCPU_GPR(R31)(r7)
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */ /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
mr r5, r12 mr r5, r12
......
This diff is collapsed.
...@@ -67,15 +67,15 @@ ...@@ -67,15 +67,15 @@
*/ */
.macro kvm_handler_common intno, srr0, flags .macro kvm_handler_common intno, srr0, flags
/* Restore host stack pointer */ /* Restore host stack pointer */
PPC_STL r1, VCPU_GPR(r1)(r4) PPC_STL r1, VCPU_GPR(R1)(r4)
PPC_STL r2, VCPU_GPR(r2)(r4) PPC_STL r2, VCPU_GPR(R2)(r4)
PPC_LL r1, VCPU_HOST_STACK(r4) PPC_LL r1, VCPU_HOST_STACK(r4)
PPC_LL r2, HOST_R2(r1) PPC_LL r2, HOST_R2(r1)
mfspr r10, SPRN_PID mfspr r10, SPRN_PID
lwz r8, VCPU_HOST_PID(r4) lwz r8, VCPU_HOST_PID(r4)
PPC_LL r11, VCPU_SHARED(r4) PPC_LL r11, VCPU_SHARED(r4)
PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */
li r14, \intno li r14, \intno
stw r10, VCPU_GUEST_PID(r4) stw r10, VCPU_GUEST_PID(r4)
...@@ -137,27 +137,27 @@ ...@@ -137,27 +137,27 @@
*/ */
mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
PPC_STL r15, VCPU_GPR(r15)(r4) PPC_STL r15, VCPU_GPR(R15)(r4)
PPC_STL r16, VCPU_GPR(r16)(r4) PPC_STL r16, VCPU_GPR(R16)(r4)
PPC_STL r17, VCPU_GPR(r17)(r4) PPC_STL r17, VCPU_GPR(R17)(r4)
PPC_STL r18, VCPU_GPR(r18)(r4) PPC_STL r18, VCPU_GPR(R18)(r4)
PPC_STL r19, VCPU_GPR(r19)(r4) PPC_STL r19, VCPU_GPR(R19)(r4)
mr r8, r3 mr r8, r3
PPC_STL r20, VCPU_GPR(r20)(r4) PPC_STL r20, VCPU_GPR(R20)(r4)
rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
PPC_STL r21, VCPU_GPR(r21)(r4) PPC_STL r21, VCPU_GPR(R21)(r4)
rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
PPC_STL r22, VCPU_GPR(r22)(r4) PPC_STL r22, VCPU_GPR(R22)(r4)
rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
PPC_STL r23, VCPU_GPR(r23)(r4) PPC_STL r23, VCPU_GPR(R23)(r4)
PPC_STL r24, VCPU_GPR(r24)(r4) PPC_STL r24, VCPU_GPR(R24)(r4)
PPC_STL r25, VCPU_GPR(r25)(r4) PPC_STL r25, VCPU_GPR(R25)(r4)
PPC_STL r26, VCPU_GPR(r26)(r4) PPC_STL r26, VCPU_GPR(R26)(r4)
PPC_STL r27, VCPU_GPR(r27)(r4) PPC_STL r27, VCPU_GPR(R27)(r4)
PPC_STL r28, VCPU_GPR(r28)(r4) PPC_STL r28, VCPU_GPR(R28)(r4)
PPC_STL r29, VCPU_GPR(r29)(r4) PPC_STL r29, VCPU_GPR(R29)(r4)
PPC_STL r30, VCPU_GPR(r30)(r4) PPC_STL r30, VCPU_GPR(R30)(r4)
PPC_STL r31, VCPU_GPR(r31)(r4) PPC_STL r31, VCPU_GPR(R31)(r4)
mtspr SPRN_EPLC, r8 mtspr SPRN_EPLC, r8
/* disable preemption, so we are sure we hit the fixup handler */ /* disable preemption, so we are sure we hit the fixup handler */
...@@ -211,24 +211,24 @@ ...@@ -211,24 +211,24 @@
.macro kvm_handler intno srr0, srr1, flags .macro kvm_handler intno srr0, srr1, flags
_GLOBAL(kvmppc_handler_\intno\()_\srr1) _GLOBAL(kvmppc_handler_\intno\()_\srr1)
GET_VCPU(r11, r10) GET_VCPU(r11, r10)
PPC_STL r3, VCPU_GPR(r3)(r11) PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, SPRN_SPRG_RSCRATCH0 mfspr r3, SPRN_SPRG_RSCRATCH0
PPC_STL r4, VCPU_GPR(r4)(r11) PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, THREAD_NORMSAVE(0)(r10) PPC_LL r4, THREAD_NORMSAVE(0)(r10)
PPC_STL r5, VCPU_GPR(r5)(r11) PPC_STL r5, VCPU_GPR(R5)(r11)
stw r13, VCPU_CR(r11) stw r13, VCPU_CR(r11)
mfspr r5, \srr0 mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(r10)(r11) PPC_STL r3, VCPU_GPR(R10)(r11)
PPC_LL r3, THREAD_NORMSAVE(2)(r10) PPC_LL r3, THREAD_NORMSAVE(2)(r10)
PPC_STL r6, VCPU_GPR(r6)(r11) PPC_STL r6, VCPU_GPR(R6)(r11)
PPC_STL r4, VCPU_GPR(r11)(r11) PPC_STL r4, VCPU_GPR(R11)(r11)
mfspr r6, \srr1 mfspr r6, \srr1
PPC_STL r7, VCPU_GPR(r7)(r11) PPC_STL r7, VCPU_GPR(R7)(r11)
PPC_STL r8, VCPU_GPR(r8)(r11) PPC_STL r8, VCPU_GPR(R8)(r11)
PPC_STL r9, VCPU_GPR(r9)(r11) PPC_STL r9, VCPU_GPR(R9)(r11)
PPC_STL r3, VCPU_GPR(r13)(r11) PPC_STL r3, VCPU_GPR(R13)(r11)
mfctr r7 mfctr r7
PPC_STL r12, VCPU_GPR(r12)(r11) PPC_STL r12, VCPU_GPR(R12)(r11)
PPC_STL r7, VCPU_CTR(r11) PPC_STL r7, VCPU_CTR(r11)
mr r4, r11 mr r4, r11
kvm_handler_common \intno, \srr0, \flags kvm_handler_common \intno, \srr0, \flags
...@@ -238,25 +238,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) ...@@ -238,25 +238,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
_GLOBAL(kvmppc_handler_\intno\()_\srr1) _GLOBAL(kvmppc_handler_\intno\()_\srr1)
mfspr r10, SPRN_SPRG_THREAD mfspr r10, SPRN_SPRG_THREAD
GET_VCPU(r11, r10) GET_VCPU(r11, r10)
PPC_STL r3, VCPU_GPR(r3)(r11) PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, \scratch mfspr r3, \scratch
PPC_STL r4, VCPU_GPR(r4)(r11) PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, GPR9(r8) PPC_LL r4, GPR9(r8)
PPC_STL r5, VCPU_GPR(r5)(r11) PPC_STL r5, VCPU_GPR(R5)(r11)
stw r9, VCPU_CR(r11) stw r9, VCPU_CR(r11)
mfspr r5, \srr0 mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(r8)(r11) PPC_STL r3, VCPU_GPR(R8)(r11)
PPC_LL r3, GPR10(r8) PPC_LL r3, GPR10(r8)
PPC_STL r6, VCPU_GPR(r6)(r11) PPC_STL r6, VCPU_GPR(R6)(r11)
PPC_STL r4, VCPU_GPR(r9)(r11) PPC_STL r4, VCPU_GPR(R9)(r11)
mfspr r6, \srr1 mfspr r6, \srr1
PPC_LL r4, GPR11(r8) PPC_LL r4, GPR11(r8)
PPC_STL r7, VCPU_GPR(r7)(r11) PPC_STL r7, VCPU_GPR(R7)(r11)
PPC_STL r3, VCPU_GPR(r10)(r11) PPC_STL r3, VCPU_GPR(R10)(r11)
mfctr r7 mfctr r7
PPC_STL r12, VCPU_GPR(r12)(r11) PPC_STL r12, VCPU_GPR(R12)(r11)
PPC_STL r13, VCPU_GPR(r13)(r11) PPC_STL r13, VCPU_GPR(R13)(r11)
PPC_STL r4, VCPU_GPR(r11)(r11) PPC_STL r4, VCPU_GPR(R11)(r11)
PPC_STL r7, VCPU_CTR(r11) PPC_STL r7, VCPU_CTR(r11)
mr r4, r11 mr r4, r11
kvm_handler_common \intno, \srr0, \flags kvm_handler_common \intno, \srr0, \flags
...@@ -310,7 +310,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ ...@@ -310,7 +310,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
_GLOBAL(kvmppc_resume_host) _GLOBAL(kvmppc_resume_host)
/* Save remaining volatile guest register state to vcpu. */ /* Save remaining volatile guest register state to vcpu. */
mfspr r3, SPRN_VRSAVE mfspr r3, SPRN_VRSAVE
PPC_STL r0, VCPU_GPR(r0)(r4) PPC_STL r0, VCPU_GPR(R0)(r4)
mflr r5 mflr r5
mfspr r6, SPRN_SPRG4 mfspr r6, SPRN_SPRG4
PPC_STL r5, VCPU_LR(r4) PPC_STL r5, VCPU_LR(r4)
...@@ -358,27 +358,27 @@ _GLOBAL(kvmppc_resume_host) ...@@ -358,27 +358,27 @@ _GLOBAL(kvmppc_resume_host)
/* Restore vcpu pointer and the nonvolatiles we used. */ /* Restore vcpu pointer and the nonvolatiles we used. */
mr r4, r14 mr r4, r14
PPC_LL r14, VCPU_GPR(r14)(r4) PPC_LL r14, VCPU_GPR(R14)(r4)
andi. r5, r3, RESUME_FLAG_NV andi. r5, r3, RESUME_FLAG_NV
beq skip_nv_load beq skip_nv_load
PPC_LL r15, VCPU_GPR(r15)(r4) PPC_LL r15, VCPU_GPR(R15)(r4)
PPC_LL r16, VCPU_GPR(r16)(r4) PPC_LL r16, VCPU_GPR(R16)(r4)
PPC_LL r17, VCPU_GPR(r17)(r4) PPC_LL r17, VCPU_GPR(R17)(r4)
PPC_LL r18, VCPU_GPR(r18)(r4) PPC_LL r18, VCPU_GPR(R18)(r4)
PPC_LL r19, VCPU_GPR(r19)(r4) PPC_LL r19, VCPU_GPR(R19)(r4)
PPC_LL r20, VCPU_GPR(r20)(r4) PPC_LL r20, VCPU_GPR(R20)(r4)
PPC_LL r21, VCPU_GPR(r21)(r4) PPC_LL r21, VCPU_GPR(R21)(r4)
PPC_LL r22, VCPU_GPR(r22)(r4) PPC_LL r22, VCPU_GPR(R22)(r4)
PPC_LL r23, VCPU_GPR(r23)(r4) PPC_LL r23, VCPU_GPR(R23)(r4)
PPC_LL r24, VCPU_GPR(r24)(r4) PPC_LL r24, VCPU_GPR(R24)(r4)
PPC_LL r25, VCPU_GPR(r25)(r4) PPC_LL r25, VCPU_GPR(R25)(r4)
PPC_LL r26, VCPU_GPR(r26)(r4) PPC_LL r26, VCPU_GPR(R26)(r4)
PPC_LL r27, VCPU_GPR(r27)(r4) PPC_LL r27, VCPU_GPR(R27)(r4)
PPC_LL r28, VCPU_GPR(r28)(r4) PPC_LL r28, VCPU_GPR(R28)(r4)
PPC_LL r29, VCPU_GPR(r29)(r4) PPC_LL r29, VCPU_GPR(R29)(r4)
PPC_LL r30, VCPU_GPR(r30)(r4) PPC_LL r30, VCPU_GPR(R30)(r4)
PPC_LL r31, VCPU_GPR(r31)(r4) PPC_LL r31, VCPU_GPR(R31)(r4)
skip_nv_load: skip_nv_load:
/* Should we return to the guest? */ /* Should we return to the guest? */
andi. r5, r3, RESUME_FLAG_HOST andi. r5, r3, RESUME_FLAG_HOST
...@@ -396,23 +396,23 @@ heavyweight_exit: ...@@ -396,23 +396,23 @@ heavyweight_exit:
* non-volatiles. * non-volatiles.
*/ */
PPC_STL r15, VCPU_GPR(r15)(r4) PPC_STL r15, VCPU_GPR(R15)(r4)
PPC_STL r16, VCPU_GPR(r16)(r4) PPC_STL r16, VCPU_GPR(R16)(r4)
PPC_STL r17, VCPU_GPR(r17)(r4) PPC_STL r17, VCPU_GPR(R17)(r4)
PPC_STL r18, VCPU_GPR(r18)(r4) PPC_STL r18, VCPU_GPR(R18)(r4)
PPC_STL r19, VCPU_GPR(r19)(r4) PPC_STL r19, VCPU_GPR(R19)(r4)
PPC_STL r20, VCPU_GPR(r20)(r4) PPC_STL r20, VCPU_GPR(R20)(r4)
PPC_STL r21, VCPU_GPR(r21)(r4) PPC_STL r21, VCPU_GPR(R21)(r4)
PPC_STL r22, VCPU_GPR(r22)(r4) PPC_STL r22, VCPU_GPR(R22)(r4)
PPC_STL r23, VCPU_GPR(r23)(r4) PPC_STL r23, VCPU_GPR(R23)(r4)
PPC_STL r24, VCPU_GPR(r24)(r4) PPC_STL r24, VCPU_GPR(R24)(r4)
PPC_STL r25, VCPU_GPR(r25)(r4) PPC_STL r25, VCPU_GPR(R25)(r4)
PPC_STL r26, VCPU_GPR(r26)(r4) PPC_STL r26, VCPU_GPR(R26)(r4)
PPC_STL r27, VCPU_GPR(r27)(r4) PPC_STL r27, VCPU_GPR(R27)(r4)
PPC_STL r28, VCPU_GPR(r28)(r4) PPC_STL r28, VCPU_GPR(R28)(r4)
PPC_STL r29, VCPU_GPR(r29)(r4) PPC_STL r29, VCPU_GPR(R29)(r4)
PPC_STL r30, VCPU_GPR(r30)(r4) PPC_STL r30, VCPU_GPR(R30)(r4)
PPC_STL r31, VCPU_GPR(r31)(r4) PPC_STL r31, VCPU_GPR(R31)(r4)
/* Load host non-volatile register state from host stack. */ /* Load host non-volatile register state from host stack. */
PPC_LL r14, HOST_NV_GPR(r14)(r1) PPC_LL r14, HOST_NV_GPR(r14)(r1)
...@@ -478,24 +478,24 @@ _GLOBAL(__kvmppc_vcpu_run) ...@@ -478,24 +478,24 @@ _GLOBAL(__kvmppc_vcpu_run)
PPC_STL r31, HOST_NV_GPR(r31)(r1) PPC_STL r31, HOST_NV_GPR(r31)(r1)
/* Load guest non-volatiles. */ /* Load guest non-volatiles. */
PPC_LL r14, VCPU_GPR(r14)(r4) PPC_LL r14, VCPU_GPR(R14)(r4)
PPC_LL r15, VCPU_GPR(r15)(r4) PPC_LL r15, VCPU_GPR(R15)(r4)
PPC_LL r16, VCPU_GPR(r16)(r4) PPC_LL r16, VCPU_GPR(R16)(r4)
PPC_LL r17, VCPU_GPR(r17)(r4) PPC_LL r17, VCPU_GPR(R17)(r4)
PPC_LL r18, VCPU_GPR(r18)(r4) PPC_LL r18, VCPU_GPR(R18)(r4)
PPC_LL r19, VCPU_GPR(r19)(r4) PPC_LL r19, VCPU_GPR(R19)(r4)
PPC_LL r20, VCPU_GPR(r20)(r4) PPC_LL r20, VCPU_GPR(R20)(r4)
PPC_LL r21, VCPU_GPR(r21)(r4) PPC_LL r21, VCPU_GPR(R21)(r4)
PPC_LL r22, VCPU_GPR(r22)(r4) PPC_LL r22, VCPU_GPR(R22)(r4)
PPC_LL r23, VCPU_GPR(r23)(r4) PPC_LL r23, VCPU_GPR(R23)(r4)
PPC_LL r24, VCPU_GPR(r24)(r4) PPC_LL r24, VCPU_GPR(R24)(r4)
PPC_LL r25, VCPU_GPR(r25)(r4) PPC_LL r25, VCPU_GPR(R25)(r4)
PPC_LL r26, VCPU_GPR(r26)(r4) PPC_LL r26, VCPU_GPR(R26)(r4)
PPC_LL r27, VCPU_GPR(r27)(r4) PPC_LL r27, VCPU_GPR(R27)(r4)
PPC_LL r28, VCPU_GPR(r28)(r4) PPC_LL r28, VCPU_GPR(R28)(r4)
PPC_LL r29, VCPU_GPR(r29)(r4) PPC_LL r29, VCPU_GPR(R29)(r4)
PPC_LL r30, VCPU_GPR(r30)(r4) PPC_LL r30, VCPU_GPR(R30)(r4)
PPC_LL r31, VCPU_GPR(r31)(r4) PPC_LL r31, VCPU_GPR(R31)(r4)
lightweight_exit: lightweight_exit:
...@@ -554,13 +554,13 @@ lightweight_exit: ...@@ -554,13 +554,13 @@ lightweight_exit:
lwz r7, VCPU_CR(r4) lwz r7, VCPU_CR(r4)
PPC_LL r8, VCPU_PC(r4) PPC_LL r8, VCPU_PC(r4)
PPC_LD(r9, VCPU_SHARED_MSR, r11) PPC_LD(r9, VCPU_SHARED_MSR, r11)
PPC_LL r0, VCPU_GPR(r0)(r4) PPC_LL r0, VCPU_GPR(R0)(r4)
PPC_LL r1, VCPU_GPR(r1)(r4) PPC_LL r1, VCPU_GPR(R1)(r4)
PPC_LL r2, VCPU_GPR(r2)(r4) PPC_LL r2, VCPU_GPR(R2)(r4)
PPC_LL r10, VCPU_GPR(r10)(r4) PPC_LL r10, VCPU_GPR(R10)(r4)
PPC_LL r11, VCPU_GPR(r11)(r4) PPC_LL r11, VCPU_GPR(R11)(r4)
PPC_LL r12, VCPU_GPR(r12)(r4) PPC_LL r12, VCPU_GPR(R12)(r4)
PPC_LL r13, VCPU_GPR(r13)(r4) PPC_LL r13, VCPU_GPR(R13)(r4)
mtlr r3 mtlr r3
mtxer r5 mtxer r5
mtctr r6 mtctr r6
...@@ -586,12 +586,12 @@ lightweight_exit: ...@@ -586,12 +586,12 @@ lightweight_exit:
mtcr r7 mtcr r7
/* Finish loading guest volatiles and jump to guest. */ /* Finish loading guest volatiles and jump to guest. */
PPC_LL r5, VCPU_GPR(r5)(r4) PPC_LL r5, VCPU_GPR(R5)(r4)
PPC_LL r6, VCPU_GPR(r6)(r4) PPC_LL r6, VCPU_GPR(R6)(r4)
PPC_LL r7, VCPU_GPR(r7)(r4) PPC_LL r7, VCPU_GPR(R7)(r4)
PPC_LL r8, VCPU_GPR(r8)(r4) PPC_LL r8, VCPU_GPR(R8)(r4)
PPC_LL r9, VCPU_GPR(r9)(r4) PPC_LL r9, VCPU_GPR(R9)(r4)
PPC_LL r3, VCPU_GPR(r3)(r4) PPC_LL r3, VCPU_GPR(R3)(r4)
PPC_LL r4, VCPU_GPR(r4)(r4) PPC_LL r4, VCPU_GPR(R4)(r4)
rfi rfi
...@@ -114,9 +114,9 @@ _GLOBAL(csum_partial) ...@@ -114,9 +114,9 @@ _GLOBAL(csum_partial)
mtctr r6 mtctr r6
stdu r1,-STACKFRAMESIZE(r1) stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
ld r6,0(r3) ld r6,0(r3)
ld r9,8(r3) ld r9,8(r3)
...@@ -175,9 +175,9 @@ _GLOBAL(csum_partial) ...@@ -175,9 +175,9 @@ _GLOBAL(csum_partial)
adde r0,r0,r15 adde r0,r0,r15
adde r0,r0,r16 adde r0,r0,r16
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE addi r1,r1,STACKFRAMESIZE
andi. r4,r4,63 andi. r4,r4,63
...@@ -299,9 +299,9 @@ dest; sth r6,0(r4) ...@@ -299,9 +299,9 @@ dest; sth r6,0(r4)
mtctr r6 mtctr r6
stdu r1,-STACKFRAMESIZE(r1) stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
source; ld r6,0(r3) source; ld r6,0(r3)
source; ld r9,8(r3) source; ld r9,8(r3)
...@@ -382,9 +382,9 @@ dest; std r16,56(r4) ...@@ -382,9 +382,9 @@ dest; std r16,56(r4)
adde r0,r0,r15 adde r0,r0,r15
adde r0,r0,r16 adde r0,r0,r16
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE addi r1,r1,STACKFRAMESIZE
andi. r5,r5,63 andi. r5,r5,63
......
...@@ -113,13 +113,13 @@ _GLOBAL(copypage_power7) ...@@ -113,13 +113,13 @@ _GLOBAL(copypage_power7)
#endif #endif
.Lnonvmx_copy: .Lnonvmx_copy:
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
std r17,STK_REG(r17)(r1) std r17,STK_REG(R17)(r1)
std r18,STK_REG(r18)(r1) std r18,STK_REG(R18)(r1)
std r19,STK_REG(r19)(r1) std r19,STK_REG(R19)(r1)
std r20,STK_REG(r20)(r1) std r20,STK_REG(R20)(r1)
1: ld r0,0(r4) 1: ld r0,0(r4)
ld r5,8(r4) ld r5,8(r4)
...@@ -157,12 +157,12 @@ _GLOBAL(copypage_power7) ...@@ -157,12 +157,12 @@ _GLOBAL(copypage_power7)
addi r3,r3,128 addi r3,r3,128
bdnz 1b bdnz 1b
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
ld r17,STK_REG(r17)(r1) ld r17,STK_REG(R17)(r1)
ld r18,STK_REG(r18)(r1) ld r18,STK_REG(R18)(r1)
ld r19,STK_REG(r19)(r1) ld r19,STK_REG(R19)(r1)
ld r20,STK_REG(r20)(r1) ld r20,STK_REG(R20)(r1)
addi r1,r1,STACKFRAMESIZE addi r1,r1,STACKFRAMESIZE
blr blr
...@@ -30,7 +30,7 @@ _GLOBAL(__copy_tofrom_user_base) ...@@ -30,7 +30,7 @@ _GLOBAL(__copy_tofrom_user_base)
dcbt 0,r4 dcbt 0,r4
beq .Lcopy_page_4K beq .Lcopy_page_4K
andi. r6,r6,7 andi. r6,r6,7
PPC_MTOCRF(0x01,r5) PPC_MTOCRF(0x01,R5)
blt cr1,.Lshort_copy blt cr1,.Lshort_copy
/* Below we want to nop out the bne if we're on a CPU that has the /* Below we want to nop out the bne if we're on a CPU that has the
* CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
...@@ -186,7 +186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -186,7 +186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr blr
.Ldst_unaligned: .Ldst_unaligned:
PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */ PPC_MTOCRF(0x01,R6) /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5 subf r5,r6,r5
li r7,0 li r7,0
cmpldi cr1,r5,16 cmpldi cr1,r5,16
...@@ -201,7 +201,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -201,7 +201,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2: bf cr7*4+1,3f 2: bf cr7*4+1,3f
37: lwzx r0,r7,r4 37: lwzx r0,r7,r4
83: stwx r0,r7,r3 83: stwx r0,r7,r3
3: PPC_MTOCRF(0x01,r5) 3: PPC_MTOCRF(0x01,R5)
add r4,r6,r4 add r4,r6,r4
add r3,r6,r3 add r3,r6,r3
b .Ldst_aligned b .Ldst_aligned
......
...@@ -57,9 +57,9 @@ ...@@ -57,9 +57,9 @@
.Ldo_err4: .Ldo_err4:
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
.Ldo_err3: .Ldo_err3:
bl .exit_vmx_usercopy bl .exit_vmx_usercopy
ld r0,STACKFRAMESIZE+16(r1) ld r0,STACKFRAMESIZE+16(r1)
...@@ -68,15 +68,15 @@ ...@@ -68,15 +68,15 @@
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
.Ldo_err2: .Ldo_err2:
ld r22,STK_REG(r22)(r1) ld r22,STK_REG(R22)(r1)
ld r21,STK_REG(r21)(r1) ld r21,STK_REG(R21)(r1)
ld r20,STK_REG(r20)(r1) ld r20,STK_REG(R20)(r1)
ld r19,STK_REG(r19)(r1) ld r19,STK_REG(R19)(r1)
ld r18,STK_REG(r18)(r1) ld r18,STK_REG(R18)(r1)
ld r17,STK_REG(r17)(r1) ld r17,STK_REG(R17)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
.Lexit: .Lexit:
addi r1,r1,STACKFRAMESIZE addi r1,r1,STACKFRAMESIZE
.Ldo_err1: .Ldo_err1:
...@@ -137,15 +137,15 @@ err1; stw r0,0(r3) ...@@ -137,15 +137,15 @@ err1; stw r0,0(r3)
mflr r0 mflr r0
stdu r1,-STACKFRAMESIZE(r1) stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
std r17,STK_REG(r17)(r1) std r17,STK_REG(R17)(r1)
std r18,STK_REG(r18)(r1) std r18,STK_REG(R18)(r1)
std r19,STK_REG(r19)(r1) std r19,STK_REG(R19)(r1)
std r20,STK_REG(r20)(r1) std r20,STK_REG(R20)(r1)
std r21,STK_REG(r21)(r1) std r21,STK_REG(R21)(r1)
std r22,STK_REG(r22)(r1) std r22,STK_REG(R22)(r1)
std r0,STACKFRAMESIZE+16(r1) std r0,STACKFRAMESIZE+16(r1)
srdi r6,r5,7 srdi r6,r5,7
...@@ -192,15 +192,15 @@ err2; std r21,120(r3) ...@@ -192,15 +192,15 @@ err2; std r21,120(r3)
clrldi r5,r5,(64-7) clrldi r5,r5,(64-7)
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
ld r17,STK_REG(r17)(r1) ld r17,STK_REG(R17)(r1)
ld r18,STK_REG(r18)(r1) ld r18,STK_REG(R18)(r1)
ld r19,STK_REG(r19)(r1) ld r19,STK_REG(R19)(r1)
ld r20,STK_REG(r20)(r1) ld r20,STK_REG(R20)(r1)
ld r21,STK_REG(r21)(r1) ld r21,STK_REG(R21)(r1)
ld r22,STK_REG(r22)(r1) ld r22,STK_REG(R22)(r1)
addi r1,r1,STACKFRAMESIZE addi r1,r1,STACKFRAMESIZE
/* Up to 127B to go */ /* Up to 127B to go */
...@@ -440,9 +440,9 @@ err3; stvx vr0,r3,r11 ...@@ -440,9 +440,9 @@ err3; stvx vr0,r3,r11
7: sub r5,r5,r6 7: sub r5,r5,r6
srdi r6,r5,7 srdi r6,r5,7
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
li r12,64 li r12,64
li r14,80 li r14,80
...@@ -477,9 +477,9 @@ err4; stvx vr0,r3,r16 ...@@ -477,9 +477,9 @@ err4; stvx vr0,r3,r16
addi r3,r3,128 addi r3,r3,128
bdnz 8b bdnz 8b
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */ /* Up to 127B to go */
clrldi r5,r5,(64-7) clrldi r5,r5,(64-7)
...@@ -625,9 +625,9 @@ err3; stvx vr11,r3,r11 ...@@ -625,9 +625,9 @@ err3; stvx vr11,r3,r11
7: sub r5,r5,r6 7: sub r5,r5,r6
srdi r6,r5,7 srdi r6,r5,7
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
li r12,64 li r12,64
li r14,80 li r14,80
...@@ -670,9 +670,9 @@ err4; stvx vr15,r3,r16 ...@@ -670,9 +670,9 @@ err4; stvx vr15,r3,r16
addi r3,r3,128 addi r3,r3,128
bdnz 8b bdnz 8b
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */ /* Up to 127B to go */
clrldi r5,r5,(64-7) clrldi r5,r5,(64-7)
......
...@@ -28,7 +28,7 @@ BEGIN_FTR_SECTION ...@@ -28,7 +28,7 @@ BEGIN_FTR_SECTION
nop nop
nop nop
FTR_SECTION_ELSE FTR_SECTION_ELSE
PPC_POPCNTB(r3,r3) PPC_POPCNTB(R3,R3)
clrldi r3,r3,64-8 clrldi r3,r3,64-8
blr blr
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
...@@ -42,14 +42,14 @@ BEGIN_FTR_SECTION ...@@ -42,14 +42,14 @@ BEGIN_FTR_SECTION
nop nop
FTR_SECTION_ELSE FTR_SECTION_ELSE
BEGIN_FTR_SECTION_NESTED(50) BEGIN_FTR_SECTION_NESTED(50)
PPC_POPCNTB(r3,r3) PPC_POPCNTB(R3,R3)
srdi r4,r3,8 srdi r4,r3,8
add r3,r4,r3 add r3,r4,r3
clrldi r3,r3,64-8 clrldi r3,r3,64-8
blr blr
FTR_SECTION_ELSE_NESTED(50) FTR_SECTION_ELSE_NESTED(50)
clrlwi r3,r3,16 clrlwi r3,r3,16
PPC_POPCNTW(r3,r3) PPC_POPCNTW(R3,R3)
clrldi r3,r3,64-8 clrldi r3,r3,64-8
blr blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
...@@ -66,7 +66,7 @@ BEGIN_FTR_SECTION ...@@ -66,7 +66,7 @@ BEGIN_FTR_SECTION
nop nop
FTR_SECTION_ELSE FTR_SECTION_ELSE
BEGIN_FTR_SECTION_NESTED(51) BEGIN_FTR_SECTION_NESTED(51)
PPC_POPCNTB(r3,r3) PPC_POPCNTB(R3,R3)
srdi r4,r3,16 srdi r4,r3,16
add r3,r4,r3 add r3,r4,r3
srdi r4,r3,8 srdi r4,r3,8
...@@ -74,7 +74,7 @@ FTR_SECTION_ELSE ...@@ -74,7 +74,7 @@ FTR_SECTION_ELSE
clrldi r3,r3,64-8 clrldi r3,r3,64-8
blr blr
FTR_SECTION_ELSE_NESTED(51) FTR_SECTION_ELSE_NESTED(51)
PPC_POPCNTW(r3,r3) PPC_POPCNTW(R3,R3)
clrldi r3,r3,64-8 clrldi r3,r3,64-8
blr blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
...@@ -93,7 +93,7 @@ BEGIN_FTR_SECTION ...@@ -93,7 +93,7 @@ BEGIN_FTR_SECTION
nop nop
FTR_SECTION_ELSE FTR_SECTION_ELSE
BEGIN_FTR_SECTION_NESTED(52) BEGIN_FTR_SECTION_NESTED(52)
PPC_POPCNTB(r3,r3) PPC_POPCNTB(R3,R3)
srdi r4,r3,32 srdi r4,r3,32
add r3,r4,r3 add r3,r4,r3
srdi r4,r3,16 srdi r4,r3,16
...@@ -103,7 +103,7 @@ FTR_SECTION_ELSE ...@@ -103,7 +103,7 @@ FTR_SECTION_ELSE
clrldi r3,r3,64-8 clrldi r3,r3,64-8
blr blr
FTR_SECTION_ELSE_NESTED(52) FTR_SECTION_ELSE_NESTED(52)
PPC_POPCNTD(r3,r3) PPC_POPCNTD(R3,R3)
clrldi r3,r3,64-8 clrldi r3,r3,64-8
blr blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52) ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
......
...@@ -330,13 +330,13 @@ _GLOBAL(do_lxvd2x) ...@@ -330,13 +330,13 @@ _GLOBAL(do_lxvd2x)
MTMSRD(r7) MTMSRD(r7)
isync isync
beq cr7,1f beq cr7,1f
STXVD2X(0,r1,r8) STXVD2X(0,R1,R8)
1: li r9,-EFAULT 1: li r9,-EFAULT
2: LXVD2X(0,0,r4) 2: LXVD2X(0,0,R4)
li r9,0 li r9,0
3: beq cr7,4f 3: beq cr7,4f
bl put_vsr bl put_vsr
LXVD2X(0,r1,r8) LXVD2X(0,R1,R8)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0 mtlr r0
MTMSRD(r6) MTMSRD(r6)
...@@ -358,13 +358,13 @@ _GLOBAL(do_stxvd2x) ...@@ -358,13 +358,13 @@ _GLOBAL(do_stxvd2x)
MTMSRD(r7) MTMSRD(r7)
isync isync
beq cr7,1f beq cr7,1f
STXVD2X(0,r1,r8) STXVD2X(0,R1,R8)
bl get_vsr bl get_vsr
1: li r9,-EFAULT 1: li r9,-EFAULT
2: STXVD2X(0,0,r4) 2: STXVD2X(0,0,R4)
li r9,0 li r9,0
3: beq cr7,4f 3: beq cr7,4f
LXVD2X(0,r1,r8) LXVD2X(0,R1,R8)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0 mtlr r0
MTMSRD(r6) MTMSRD(r6)
......
...@@ -19,7 +19,7 @@ _GLOBAL(memset) ...@@ -19,7 +19,7 @@ _GLOBAL(memset)
rlwimi r4,r4,16,0,15 rlwimi r4,r4,16,0,15
cmplw cr1,r5,r0 /* do we get that far? */ cmplw cr1,r5,r0 /* do we get that far? */
rldimi r4,r4,32,0 rldimi r4,r4,32,0
PPC_MTOCRF(1,r0) PPC_MTOCRF(1,R0)
mr r6,r3 mr r6,r3
blt cr1,8f blt cr1,8f
beq+ 3f /* if already 8-byte aligned */ beq+ 3f /* if already 8-byte aligned */
...@@ -49,7 +49,7 @@ _GLOBAL(memset) ...@@ -49,7 +49,7 @@ _GLOBAL(memset)
bdnz 4b bdnz 4b
5: srwi. r0,r5,3 5: srwi. r0,r5,3
clrlwi r5,r5,29 clrlwi r5,r5,29
PPC_MTOCRF(1,r0) PPC_MTOCRF(1,R0)
beq 8f beq 8f
bf 29,6f bf 29,6f
std r4,0(r6) std r4,0(r6)
...@@ -65,7 +65,7 @@ _GLOBAL(memset) ...@@ -65,7 +65,7 @@ _GLOBAL(memset)
std r4,0(r6) std r4,0(r6)
addi r6,r6,8 addi r6,r6,8
8: cmpwi r5,0 8: cmpwi r5,0
PPC_MTOCRF(1,r5) PPC_MTOCRF(1,R5)
beqlr+ beqlr+
bf 29,9f bf 29,9f
stw r4,0(r6) stw r4,0(r6)
......
...@@ -16,7 +16,7 @@ BEGIN_FTR_SECTION ...@@ -16,7 +16,7 @@ BEGIN_FTR_SECTION
FTR_SECTION_ELSE FTR_SECTION_ELSE
b memcpy_power7 b memcpy_power7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
PPC_MTOCRF(0x01,r5) PPC_MTOCRF(0x01,R5)
cmpldi cr1,r5,16 cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7 andi. r6,r6,7
...@@ -158,7 +158,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -158,7 +158,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr blr
.Ldst_unaligned: .Ldst_unaligned:
PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7 PPC_MTOCRF(0x01,R6) # put #bytes to 8B bdry into cr7
subf r5,r6,r5 subf r5,r6,r5
li r7,0 li r7,0
cmpldi cr1,r5,16 cmpldi cr1,r5,16
...@@ -173,7 +173,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ...@@ -173,7 +173,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2: bf cr7*4+1,3f 2: bf cr7*4+1,3f
lwzx r0,r7,r4 lwzx r0,r7,r4
stwx r0,r7,r3 stwx r0,r7,r3
3: PPC_MTOCRF(0x01,r5) 3: PPC_MTOCRF(0x01,R5)
add r4,r6,r4 add r4,r6,r4
add r3,r6,r3 add r3,r6,r3
b .Ldst_aligned b .Ldst_aligned
......
...@@ -69,15 +69,15 @@ _GLOBAL(memcpy_power7) ...@@ -69,15 +69,15 @@ _GLOBAL(memcpy_power7)
mflr r0 mflr r0
stdu r1,-STACKFRAMESIZE(r1) stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
std r17,STK_REG(r17)(r1) std r17,STK_REG(R17)(r1)
std r18,STK_REG(r18)(r1) std r18,STK_REG(R18)(r1)
std r19,STK_REG(r19)(r1) std r19,STK_REG(R19)(r1)
std r20,STK_REG(r20)(r1) std r20,STK_REG(R20)(r1)
std r21,STK_REG(r21)(r1) std r21,STK_REG(R21)(r1)
std r22,STK_REG(r22)(r1) std r22,STK_REG(R22)(r1)
std r0,STACKFRAMESIZE+16(r1) std r0,STACKFRAMESIZE+16(r1)
srdi r6,r5,7 srdi r6,r5,7
...@@ -124,15 +124,15 @@ _GLOBAL(memcpy_power7) ...@@ -124,15 +124,15 @@ _GLOBAL(memcpy_power7)
clrldi r5,r5,(64-7) clrldi r5,r5,(64-7)
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
ld r17,STK_REG(r17)(r1) ld r17,STK_REG(R17)(r1)
ld r18,STK_REG(r18)(r1) ld r18,STK_REG(R18)(r1)
ld r19,STK_REG(r19)(r1) ld r19,STK_REG(R19)(r1)
ld r20,STK_REG(r20)(r1) ld r20,STK_REG(R20)(r1)
ld r21,STK_REG(r21)(r1) ld r21,STK_REG(R21)(r1)
ld r22,STK_REG(r22)(r1) ld r22,STK_REG(R22)(r1)
addi r1,r1,STACKFRAMESIZE addi r1,r1,STACKFRAMESIZE
/* Up to 127B to go */ /* Up to 127B to go */
...@@ -343,9 +343,9 @@ _GLOBAL(memcpy_power7) ...@@ -343,9 +343,9 @@ _GLOBAL(memcpy_power7)
7: sub r5,r5,r6 7: sub r5,r5,r6
srdi r6,r5,7 srdi r6,r5,7
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
li r12,64 li r12,64
li r14,80 li r14,80
...@@ -380,9 +380,9 @@ _GLOBAL(memcpy_power7) ...@@ -380,9 +380,9 @@ _GLOBAL(memcpy_power7)
addi r3,r3,128 addi r3,r3,128
bdnz 8b bdnz 8b
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */ /* Up to 127B to go */
clrldi r5,r5,(64-7) clrldi r5,r5,(64-7)
...@@ -529,9 +529,9 @@ _GLOBAL(memcpy_power7) ...@@ -529,9 +529,9 @@ _GLOBAL(memcpy_power7)
7: sub r5,r5,r6 7: sub r5,r5,r6
srdi r6,r5,7 srdi r6,r5,7
std r14,STK_REG(r14)(r1) std r14,STK_REG(R14)(r1)
std r15,STK_REG(r15)(r1) std r15,STK_REG(R15)(r1)
std r16,STK_REG(r16)(r1) std r16,STK_REG(R16)(r1)
li r12,64 li r12,64
li r14,80 li r14,80
...@@ -574,9 +574,9 @@ _GLOBAL(memcpy_power7) ...@@ -574,9 +574,9 @@ _GLOBAL(memcpy_power7)
addi r3,r3,128 addi r3,r3,128
bdnz 8b bdnz 8b
ld r14,STK_REG(r14)(r1) ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(r15)(r1) ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(r16)(r1) ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */ /* Up to 127B to go */
clrldi r5,r5,(64-7) clrldi r5,r5,(64-7)
......
This diff is collapsed.
...@@ -126,7 +126,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -126,7 +126,7 @@ BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and search for existing entry. Then load /* Set the TLB reservation and search for existing entry. Then load
* the entry. * the entry.
*/ */
PPC_TLBSRX_DOT(0,r16) PPC_TLBSRX_DOT(R0,R16)
ldx r14,r14,r15 /* grab pgd entry */ ldx r14,r14,r15 /* grab pgd entry */
beq normal_tlb_miss_done /* tlb exists already, bail */ beq normal_tlb_miss_done /* tlb exists already, bail */
MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE
...@@ -395,7 +395,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -395,7 +395,7 @@ BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and search for existing entry. Then load /* Set the TLB reservation and search for existing entry. Then load
* the entry. * the entry.
*/ */
PPC_TLBSRX_DOT(0,r16) PPC_TLBSRX_DOT(R0,R16)
ld r14,0(r10) ld r14,0(r10)
beq normal_tlb_miss_done beq normal_tlb_miss_done
MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE
...@@ -528,7 +528,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -528,7 +528,7 @@ BEGIN_MMU_FTR_SECTION
/* Search if we already have a TLB entry for that virtual address, and /* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out. * if we do, bail out.
*/ */
PPC_TLBSRX_DOT(0,r16) PPC_TLBSRX_DOT(R0,R16)
beq virt_page_table_tlb_miss_done beq virt_page_table_tlb_miss_done
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
...@@ -779,7 +779,7 @@ htw_tlb_miss: ...@@ -779,7 +779,7 @@ htw_tlb_miss:
* *
* MAS1:IND should be already set based on MAS4 * MAS1:IND should be already set based on MAS4
*/ */
PPC_TLBSRX_DOT(0,r16) PPC_TLBSRX_DOT(R0,R16)
beq htw_tlb_miss_done beq htw_tlb_miss_done
/* Now, we need to walk the page tables. First check if we are in /* Now, we need to walk the page tables. First check if we are in
...@@ -919,7 +919,7 @@ tlb_load_linear: ...@@ -919,7 +919,7 @@ tlb_load_linear:
mtspr SPRN_MAS1,r15 mtspr SPRN_MAS1,r15
/* Already somebody there ? */ /* Already somebody there ? */
PPC_TLBSRX_DOT(0,r16) PPC_TLBSRX_DOT(R0,R16)
beq tlb_load_linear_done beq tlb_load_linear_done
/* Now we build the remaining MAS. MAS0 and 2 should be fine /* Now we build the remaining MAS. MAS0 and 2 should be fine
......
...@@ -313,7 +313,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -313,7 +313,7 @@ BEGIN_MMU_FTR_SECTION
mtspr SPRN_MAS1,r4 mtspr SPRN_MAS1,r4
tlbwe tlbwe
MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE
PPC_TLBILX_VA(0,r3) PPC_TLBILX_VA(R0,R3)
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
msync msync
isync isync
...@@ -364,7 +364,7 @@ _GLOBAL(_tlbil_va) ...@@ -364,7 +364,7 @@ _GLOBAL(_tlbil_va)
beq 1f beq 1f
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
PPC_TLBILX_VA(0,r3) PPC_TLBILX_VA(R0,R3)
msync msync
isync isync
wrtee r10 wrtee r10
...@@ -379,7 +379,7 @@ _GLOBAL(_tlbivax_bcast) ...@@ -379,7 +379,7 @@ _GLOBAL(_tlbivax_bcast)
beq 1f beq 1f
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
PPC_TLBIVAX(0,r3) PPC_TLBIVAX(R0,R3)
eieio eieio
tlbsync tlbsync
sync sync
......
...@@ -39,7 +39,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, ...@@ -39,7 +39,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
/* Make stackframe */ /* Make stackframe */
if (ctx->seen & SEEN_DATAREF) { if (ctx->seen & SEEN_DATAREF) {
/* If we call any helpers (for loads), save LR */ /* If we call any helpers (for loads), save LR */
EMIT(PPC_INST_MFLR | __PPC_RT(0)); EMIT(PPC_INST_MFLR | __PPC_RT(R0));
PPC_STD(0, 1, 16); PPC_STD(0, 1, 16);
/* Back up non-volatile regs. */ /* Back up non-volatile regs. */
...@@ -56,7 +56,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, ...@@ -56,7 +56,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
PPC_STD(i, 1, -(8*(32-i))); PPC_STD(i, 1, -(8*(32-i)));
} }
} }
EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) | EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
(-BPF_PPC_STACKFRAME & 0xfffc)); (-BPF_PPC_STACKFRAME & 0xfffc));
} }
......
...@@ -74,7 +74,7 @@ _GLOBAL(beat_hcall_norets8) ...@@ -74,7 +74,7 @@ _GLOBAL(beat_hcall_norets8)
mr r6,r7 mr r6,r7
mr r7,r8 mr r7,r8
mr r8,r9 mr r8,r9
ld r10,STK_PARM(r10)(r1) ld r10,STK_PARM(R10)(r1)
HVSC /* invoke the hypervisor */ HVSC /* invoke the hypervisor */
...@@ -94,7 +94,7 @@ _GLOBAL(beat_hcall1) ...@@ -94,7 +94,7 @@ _GLOBAL(beat_hcall1)
HCALL_INST_PRECALL HCALL_INST_PRECALL
std r4,STK_PARM(r4)(r1) /* save ret buffer */ std r4,STK_PARM(R4)(r1) /* save ret buffer */
mr r11,r3 mr r11,r3
mr r3,r5 mr r3,r5
...@@ -108,7 +108,7 @@ _GLOBAL(beat_hcall1) ...@@ -108,7 +108,7 @@ _GLOBAL(beat_hcall1)
HCALL_INST_POSTCALL HCALL_INST_POSTCALL
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
lwz r0,8(r1) lwz r0,8(r1)
...@@ -125,7 +125,7 @@ _GLOBAL(beat_hcall2) ...@@ -125,7 +125,7 @@ _GLOBAL(beat_hcall2)
HCALL_INST_PRECALL HCALL_INST_PRECALL
std r4,STK_PARM(r4)(r1) /* save ret buffer */ std r4,STK_PARM(R4)(r1) /* save ret buffer */
mr r11,r3 mr r11,r3
mr r3,r5 mr r3,r5
...@@ -139,7 +139,7 @@ _GLOBAL(beat_hcall2) ...@@ -139,7 +139,7 @@ _GLOBAL(beat_hcall2)
HCALL_INST_POSTCALL HCALL_INST_POSTCALL
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
...@@ -157,7 +157,7 @@ _GLOBAL(beat_hcall3) ...@@ -157,7 +157,7 @@ _GLOBAL(beat_hcall3)
HCALL_INST_PRECALL HCALL_INST_PRECALL
std r4,STK_PARM(r4)(r1) /* save ret buffer */ std r4,STK_PARM(R4)(r1) /* save ret buffer */
mr r11,r3 mr r11,r3
mr r3,r5 mr r3,r5
...@@ -171,7 +171,7 @@ _GLOBAL(beat_hcall3) ...@@ -171,7 +171,7 @@ _GLOBAL(beat_hcall3)
HCALL_INST_POSTCALL HCALL_INST_POSTCALL
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
std r6, 16(r12) std r6, 16(r12)
...@@ -190,7 +190,7 @@ _GLOBAL(beat_hcall4) ...@@ -190,7 +190,7 @@ _GLOBAL(beat_hcall4)
HCALL_INST_PRECALL HCALL_INST_PRECALL
std r4,STK_PARM(r4)(r1) /* save ret buffer */ std r4,STK_PARM(R4)(r1) /* save ret buffer */
mr r11,r3 mr r11,r3
mr r3,r5 mr r3,r5
...@@ -204,7 +204,7 @@ _GLOBAL(beat_hcall4) ...@@ -204,7 +204,7 @@ _GLOBAL(beat_hcall4)
HCALL_INST_POSTCALL HCALL_INST_POSTCALL
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
std r6, 16(r12) std r6, 16(r12)
...@@ -224,7 +224,7 @@ _GLOBAL(beat_hcall5) ...@@ -224,7 +224,7 @@ _GLOBAL(beat_hcall5)
HCALL_INST_PRECALL HCALL_INST_PRECALL
std r4,STK_PARM(r4)(r1) /* save ret buffer */ std r4,STK_PARM(R4)(r1) /* save ret buffer */
mr r11,r3 mr r11,r3
mr r3,r5 mr r3,r5
...@@ -238,7 +238,7 @@ _GLOBAL(beat_hcall5) ...@@ -238,7 +238,7 @@ _GLOBAL(beat_hcall5)
HCALL_INST_POSTCALL HCALL_INST_POSTCALL
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
std r6, 16(r12) std r6, 16(r12)
...@@ -259,7 +259,7 @@ _GLOBAL(beat_hcall6) ...@@ -259,7 +259,7 @@ _GLOBAL(beat_hcall6)
HCALL_INST_PRECALL HCALL_INST_PRECALL
std r4,STK_PARM(r4)(r1) /* save ret buffer */ std r4,STK_PARM(R4)(r1) /* save ret buffer */
mr r11,r3 mr r11,r3
mr r3,r5 mr r3,r5
...@@ -273,7 +273,7 @@ _GLOBAL(beat_hcall6) ...@@ -273,7 +273,7 @@ _GLOBAL(beat_hcall6)
HCALL_INST_POSTCALL HCALL_INST_POSTCALL
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
std r6, 16(r12) std r6, 16(r12)
......
...@@ -23,14 +23,14 @@ ...@@ -23,14 +23,14 @@
_GLOBAL(opal_query_takeover) _GLOBAL(opal_query_takeover)
mfcr r0 mfcr r0
stw r0,8(r1) stw r0,8(r1)
std r3,STK_PARAM(r3)(r1) std r3,STK_PARAM(R3)(r1)
std r4,STK_PARAM(r4)(r1) std r4,STK_PARAM(R4)(r1)
li r3,H_HAL_TAKEOVER li r3,H_HAL_TAKEOVER
li r4,H_HAL_TAKEOVER_QUERY_MAGIC li r4,H_HAL_TAKEOVER_QUERY_MAGIC
HVSC HVSC
ld r10,STK_PARAM(r3)(r1) ld r10,STK_PARAM(R3)(r1)
std r4,0(r10) std r4,0(r10)
ld r10,STK_PARAM(r4)(r1) ld r10,STK_PARAM(R4)(r1)
std r5,0(r10) std r5,0(r10)
lwz r0,8(r1) lwz r0,8(r1)
mtcrf 0xff,r0 mtcrf 0xff,r0
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
std r12,PACASAVEDMSR(r13); \ std r12,PACASAVEDMSR(r13); \
andc r12,r12,r0; \ andc r12,r12,r0; \
mtmsrd r12,1; \ mtmsrd r12,1; \
LOAD_REG_ADDR(r0,.opal_return); \ LOAD_REG_ADDR(R0,.opal_return); \
mtlr r0; \ mtlr r0; \
li r0,MSR_DR|MSR_IR; \ li r0,MSR_DR|MSR_IR; \
andc r12,r12,r0; \ andc r12,r12,r0; \
......
...@@ -40,28 +40,28 @@ END_FTR_SECTION(0, 1); \ ...@@ -40,28 +40,28 @@ END_FTR_SECTION(0, 1); \
cmpdi r12,0; \ cmpdi r12,0; \
beq+ 1f; \ beq+ 1f; \
mflr r0; \ mflr r0; \
std r3,STK_PARM(r3)(r1); \ std r3,STK_PARM(R3)(r1); \
std r4,STK_PARM(r4)(r1); \ std r4,STK_PARM(R4)(r1); \
std r5,STK_PARM(r5)(r1); \ std r5,STK_PARM(R5)(r1); \
std r6,STK_PARM(r6)(r1); \ std r6,STK_PARM(R6)(r1); \
std r7,STK_PARM(r7)(r1); \ std r7,STK_PARM(R7)(r1); \
std r8,STK_PARM(r8)(r1); \ std r8,STK_PARM(R8)(r1); \
std r9,STK_PARM(r9)(r1); \ std r9,STK_PARM(R9)(r1); \
std r10,STK_PARM(r10)(r1); \ std r10,STK_PARM(R10)(r1); \
std r0,16(r1); \ std r0,16(r1); \
addi r4,r1,STK_PARM(FIRST_REG); \ addi r4,r1,STK_PARM(FIRST_REG); \
stdu r1,-STACK_FRAME_OVERHEAD(r1); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \
bl .__trace_hcall_entry; \ bl .__trace_hcall_entry; \
addi r1,r1,STACK_FRAME_OVERHEAD; \ addi r1,r1,STACK_FRAME_OVERHEAD; \
ld r0,16(r1); \ ld r0,16(r1); \
ld r3,STK_PARM(r3)(r1); \ ld r3,STK_PARM(R3)(r1); \
ld r4,STK_PARM(r4)(r1); \ ld r4,STK_PARM(R4)(r1); \
ld r5,STK_PARM(r5)(r1); \ ld r5,STK_PARM(R5)(r1); \
ld r6,STK_PARM(r6)(r1); \ ld r6,STK_PARM(R6)(r1); \
ld r7,STK_PARM(r7)(r1); \ ld r7,STK_PARM(R7)(r1); \
ld r8,STK_PARM(r8)(r1); \ ld r8,STK_PARM(R8)(r1); \
ld r9,STK_PARM(r9)(r1); \ ld r9,STK_PARM(R9)(r1); \
ld r10,STK_PARM(r10)(r1); \ ld r10,STK_PARM(R10)(r1); \
mtlr r0; \ mtlr r0; \
1: 1:
...@@ -79,8 +79,8 @@ END_FTR_SECTION(0, 1); \ ...@@ -79,8 +79,8 @@ END_FTR_SECTION(0, 1); \
cmpdi r12,0; \ cmpdi r12,0; \
beq+ 1f; \ beq+ 1f; \
mflr r0; \ mflr r0; \
ld r6,STK_PARM(r3)(r1); \ ld r6,STK_PARM(R3)(r1); \
std r3,STK_PARM(r3)(r1); \ std r3,STK_PARM(R3)(r1); \
mr r4,r3; \ mr r4,r3; \
mr r3,r6; \ mr r3,r6; \
std r0,16(r1); \ std r0,16(r1); \
...@@ -88,7 +88,7 @@ END_FTR_SECTION(0, 1); \ ...@@ -88,7 +88,7 @@ END_FTR_SECTION(0, 1); \
bl .__trace_hcall_exit; \ bl .__trace_hcall_exit; \
addi r1,r1,STACK_FRAME_OVERHEAD; \ addi r1,r1,STACK_FRAME_OVERHEAD; \
ld r0,16(r1); \ ld r0,16(r1); \
ld r3,STK_PARM(r3)(r1); \ ld r3,STK_PARM(R3)(r1); \
mtlr r0; \ mtlr r0; \
1: 1:
...@@ -114,7 +114,7 @@ _GLOBAL(plpar_hcall_norets) ...@@ -114,7 +114,7 @@ _GLOBAL(plpar_hcall_norets)
mfcr r0 mfcr r0
stw r0,8(r1) stw r0,8(r1)
HCALL_INST_PRECALL(r4) HCALL_INST_PRECALL(R4)
HVSC /* invoke the hypervisor */ HVSC /* invoke the hypervisor */
...@@ -130,9 +130,9 @@ _GLOBAL(plpar_hcall) ...@@ -130,9 +130,9 @@ _GLOBAL(plpar_hcall)
mfcr r0 mfcr r0
stw r0,8(r1) stw r0,8(r1)
HCALL_INST_PRECALL(r5) HCALL_INST_PRECALL(R5)
std r4,STK_PARM(r4)(r1) /* Save ret buffer */ std r4,STK_PARM(R4)(r1) /* Save ret buffer */
mr r4,r5 mr r4,r5
mr r5,r6 mr r5,r6
...@@ -143,7 +143,7 @@ _GLOBAL(plpar_hcall) ...@@ -143,7 +143,7 @@ _GLOBAL(plpar_hcall)
HVSC /* invoke the hypervisor */ HVSC /* invoke the hypervisor */
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
std r6, 16(r12) std r6, 16(r12)
...@@ -168,7 +168,7 @@ _GLOBAL(plpar_hcall_raw) ...@@ -168,7 +168,7 @@ _GLOBAL(plpar_hcall_raw)
mfcr r0 mfcr r0
stw r0,8(r1) stw r0,8(r1)
std r4,STK_PARM(r4)(r1) /* Save ret buffer */ std r4,STK_PARM(R4)(r1) /* Save ret buffer */
mr r4,r5 mr r4,r5
mr r5,r6 mr r5,r6
...@@ -179,7 +179,7 @@ _GLOBAL(plpar_hcall_raw) ...@@ -179,7 +179,7 @@ _GLOBAL(plpar_hcall_raw)
HVSC /* invoke the hypervisor */ HVSC /* invoke the hypervisor */
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
std r6, 16(r12) std r6, 16(r12)
...@@ -196,9 +196,9 @@ _GLOBAL(plpar_hcall9) ...@@ -196,9 +196,9 @@ _GLOBAL(plpar_hcall9)
mfcr r0 mfcr r0
stw r0,8(r1) stw r0,8(r1)
HCALL_INST_PRECALL(r5) HCALL_INST_PRECALL(R5)
std r4,STK_PARM(r4)(r1) /* Save ret buffer */ std r4,STK_PARM(R4)(r1) /* Save ret buffer */
mr r4,r5 mr r4,r5
mr r5,r6 mr r5,r6
...@@ -206,14 +206,14 @@ _GLOBAL(plpar_hcall9) ...@@ -206,14 +206,14 @@ _GLOBAL(plpar_hcall9)
mr r7,r8 mr r7,r8
mr r8,r9 mr r8,r9
mr r9,r10 mr r9,r10
ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */
ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */
ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */
HVSC /* invoke the hypervisor */ HVSC /* invoke the hypervisor */
mr r0,r12 mr r0,r12
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
std r6, 16(r12) std r6, 16(r12)
...@@ -238,7 +238,7 @@ _GLOBAL(plpar_hcall9_raw) ...@@ -238,7 +238,7 @@ _GLOBAL(plpar_hcall9_raw)
mfcr r0 mfcr r0
stw r0,8(r1) stw r0,8(r1)
std r4,STK_PARM(r4)(r1) /* Save ret buffer */ std r4,STK_PARM(R4)(r1) /* Save ret buffer */
mr r4,r5 mr r4,r5
mr r5,r6 mr r5,r6
...@@ -246,14 +246,14 @@ _GLOBAL(plpar_hcall9_raw) ...@@ -246,14 +246,14 @@ _GLOBAL(plpar_hcall9_raw)
mr r7,r8 mr r7,r8
mr r8,r9 mr r8,r9
mr r9,r10 mr r9,r10
ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */
ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */
ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */
HVSC /* invoke the hypervisor */ HVSC /* invoke the hypervisor */
mr r0,r12 mr r0,r12
ld r12,STK_PARM(r4)(r1) ld r12,STK_PARM(R4)(r1)
std r4, 0(r12) std r4, 0(r12)
std r5, 8(r12) std r5, 8(r12)
std r6, 16(r12) std r6, 16(r12)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment