Commit 8a583c0a authored by Andreas Schwab's avatar Andreas Schwab Committed by Michael Ellerman

powerpc: Fix invalid use of register expressions

binutils >= 2.26 now warns about misuse of register expressions in
assembler operands that are actually literals, for example:

  arch/powerpc/kernel/entry_64.S:535: Warning: invalid register expression

In practice these are almost all uses of r0 that should just be a
literal 0.
Signed-off-by: default avatarAndreas Schwab <schwab@linux-m68k.org>
[mpe: Mention r0 is almost always the culprit, fold in purgatory change]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 21a0e8c1
...@@ -439,7 +439,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) ...@@ -439,7 +439,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
.machine push ; \ .machine push ; \
.machine "power4" ; \ .machine "power4" ; \
lis scratch,0x60000000@h; \ lis scratch,0x60000000@h; \
dcbt r0,scratch,0b01010; \ dcbt 0,scratch,0b01010; \
.machine pop .machine pop
/* /*
......
...@@ -179,7 +179,7 @@ nothing_to_copy: ...@@ -179,7 +179,7 @@ nothing_to_copy:
sld r3, r3, r0 sld r3, r3, r0
li r0, 0 li r0, 0
1: 1:
dcbf r0,r3 dcbf 0,r3
addi r3,r3,0x20 addi r3,r3,0x20
bdnz 1b bdnz 1b
......
...@@ -45,13 +45,13 @@ _GLOBAL(copypage_power7) ...@@ -45,13 +45,13 @@ _GLOBAL(copypage_power7)
.machine push .machine push
.machine "power4" .machine "power4"
/* setup read stream 0 */ /* setup read stream 0 */
dcbt r0,r4,0b01000 /* addr from */ dcbt 0,r4,0b01000 /* addr from */
dcbt r0,r7,0b01010 /* length and depth from */ dcbt 0,r7,0b01010 /* length and depth from */
/* setup write stream 1 */ /* setup write stream 1 */
dcbtst r0,r9,0b01000 /* addr to */ dcbtst 0,r9,0b01000 /* addr to */
dcbtst r0,r10,0b01010 /* length and depth to */ dcbtst 0,r10,0b01010 /* length and depth to */
eieio eieio
dcbt r0,r8,0b01010 /* all streams GO */ dcbt 0,r8,0b01010 /* all streams GO */
.machine pop .machine pop
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
...@@ -83,7 +83,7 @@ _GLOBAL(copypage_power7) ...@@ -83,7 +83,7 @@ _GLOBAL(copypage_power7)
li r12,112 li r12,112
.align 5 .align 5
1: lvx v7,r0,r4 1: lvx v7,0,r4
lvx v6,r4,r6 lvx v6,r4,r6
lvx v5,r4,r7 lvx v5,r4,r7
lvx v4,r4,r8 lvx v4,r4,r8
...@@ -92,7 +92,7 @@ _GLOBAL(copypage_power7) ...@@ -92,7 +92,7 @@ _GLOBAL(copypage_power7)
lvx v1,r4,r11 lvx v1,r4,r11
lvx v0,r4,r12 lvx v0,r4,r12
addi r4,r4,128 addi r4,r4,128
stvx v7,r0,r3 stvx v7,0,r3
stvx v6,r3,r6 stvx v6,r3,r6
stvx v5,r3,r7 stvx v5,r3,r7
stvx v4,r3,r8 stvx v4,r3,r8
......
...@@ -315,13 +315,13 @@ err1; stb r0,0(r3) ...@@ -315,13 +315,13 @@ err1; stb r0,0(r3)
.machine push .machine push
.machine "power4" .machine "power4"
/* setup read stream 0 */ /* setup read stream 0 */
dcbt r0,r6,0b01000 /* addr from */ dcbt 0,r6,0b01000 /* addr from */
dcbt r0,r7,0b01010 /* length and depth from */ dcbt 0,r7,0b01010 /* length and depth from */
/* setup write stream 1 */ /* setup write stream 1 */
dcbtst r0,r9,0b01000 /* addr to */ dcbtst 0,r9,0b01000 /* addr to */
dcbtst r0,r10,0b01010 /* length and depth to */ dcbtst 0,r10,0b01010 /* length and depth to */
eieio eieio
dcbt r0,r8,0b01010 /* all streams GO */ dcbt 0,r8,0b01010 /* all streams GO */
.machine pop .machine pop
beq cr1,.Lunwind_stack_nonvmx_copy beq cr1,.Lunwind_stack_nonvmx_copy
...@@ -376,26 +376,26 @@ err3; std r0,0(r3) ...@@ -376,26 +376,26 @@ err3; std r0,0(r3)
li r11,48 li r11,48
bf cr7*4+3,5f bf cr7*4+3,5f
err3; lvx v1,r0,r4 err3; lvx v1,0,r4
addi r4,r4,16 addi r4,r4,16
err3; stvx v1,r0,r3 err3; stvx v1,0,r3
addi r3,r3,16 addi r3,r3,16
5: bf cr7*4+2,6f 5: bf cr7*4+2,6f
err3; lvx v1,r0,r4 err3; lvx v1,0,r4
err3; lvx v0,r4,r9 err3; lvx v0,r4,r9
addi r4,r4,32 addi r4,r4,32
err3; stvx v1,r0,r3 err3; stvx v1,0,r3
err3; stvx v0,r3,r9 err3; stvx v0,r3,r9
addi r3,r3,32 addi r3,r3,32
6: bf cr7*4+1,7f 6: bf cr7*4+1,7f
err3; lvx v3,r0,r4 err3; lvx v3,0,r4
err3; lvx v2,r4,r9 err3; lvx v2,r4,r9
err3; lvx v1,r4,r10 err3; lvx v1,r4,r10
err3; lvx v0,r4,r11 err3; lvx v0,r4,r11
addi r4,r4,64 addi r4,r4,64
err3; stvx v3,r0,r3 err3; stvx v3,0,r3
err3; stvx v2,r3,r9 err3; stvx v2,r3,r9
err3; stvx v1,r3,r10 err3; stvx v1,r3,r10
err3; stvx v0,r3,r11 err3; stvx v0,r3,r11
...@@ -421,7 +421,7 @@ err3; stvx v0,r3,r11 ...@@ -421,7 +421,7 @@ err3; stvx v0,r3,r11
*/ */
.align 5 .align 5
8: 8:
err4; lvx v7,r0,r4 err4; lvx v7,0,r4
err4; lvx v6,r4,r9 err4; lvx v6,r4,r9
err4; lvx v5,r4,r10 err4; lvx v5,r4,r10
err4; lvx v4,r4,r11 err4; lvx v4,r4,r11
...@@ -430,7 +430,7 @@ err4; lvx v2,r4,r14 ...@@ -430,7 +430,7 @@ err4; lvx v2,r4,r14
err4; lvx v1,r4,r15 err4; lvx v1,r4,r15
err4; lvx v0,r4,r16 err4; lvx v0,r4,r16
addi r4,r4,128 addi r4,r4,128
err4; stvx v7,r0,r3 err4; stvx v7,0,r3
err4; stvx v6,r3,r9 err4; stvx v6,r3,r9
err4; stvx v5,r3,r10 err4; stvx v5,r3,r10
err4; stvx v4,r3,r11 err4; stvx v4,r3,r11
...@@ -451,29 +451,29 @@ err4; stvx v0,r3,r16 ...@@ -451,29 +451,29 @@ err4; stvx v0,r3,r16
mtocrf 0x01,r6 mtocrf 0x01,r6
bf cr7*4+1,9f bf cr7*4+1,9f
err3; lvx v3,r0,r4 err3; lvx v3,0,r4
err3; lvx v2,r4,r9 err3; lvx v2,r4,r9
err3; lvx v1,r4,r10 err3; lvx v1,r4,r10
err3; lvx v0,r4,r11 err3; lvx v0,r4,r11
addi r4,r4,64 addi r4,r4,64
err3; stvx v3,r0,r3 err3; stvx v3,0,r3
err3; stvx v2,r3,r9 err3; stvx v2,r3,r9
err3; stvx v1,r3,r10 err3; stvx v1,r3,r10
err3; stvx v0,r3,r11 err3; stvx v0,r3,r11
addi r3,r3,64 addi r3,r3,64
9: bf cr7*4+2,10f 9: bf cr7*4+2,10f
err3; lvx v1,r0,r4 err3; lvx v1,0,r4
err3; lvx v0,r4,r9 err3; lvx v0,r4,r9
addi r4,r4,32 addi r4,r4,32
err3; stvx v1,r0,r3 err3; stvx v1,0,r3
err3; stvx v0,r3,r9 err3; stvx v0,r3,r9
addi r3,r3,32 addi r3,r3,32
10: bf cr7*4+3,11f 10: bf cr7*4+3,11f
err3; lvx v1,r0,r4 err3; lvx v1,0,r4
addi r4,r4,16 addi r4,r4,16
err3; stvx v1,r0,r3 err3; stvx v1,0,r3
addi r3,r3,16 addi r3,r3,16
/* Up to 15B to go */ /* Up to 15B to go */
...@@ -553,25 +553,25 @@ err3; lvx v0,0,r4 ...@@ -553,25 +553,25 @@ err3; lvx v0,0,r4
addi r4,r4,16 addi r4,r4,16
bf cr7*4+3,5f bf cr7*4+3,5f
err3; lvx v1,r0,r4 err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16) VPERM(v8,v0,v1,v16)
addi r4,r4,16 addi r4,r4,16
err3; stvx v8,r0,r3 err3; stvx v8,0,r3
addi r3,r3,16 addi r3,r3,16
vor v0,v1,v1 vor v0,v1,v1
5: bf cr7*4+2,6f 5: bf cr7*4+2,6f
err3; lvx v1,r0,r4 err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16) VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9 err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16) VPERM(v9,v1,v0,v16)
addi r4,r4,32 addi r4,r4,32
err3; stvx v8,r0,r3 err3; stvx v8,0,r3
err3; stvx v9,r3,r9 err3; stvx v9,r3,r9
addi r3,r3,32 addi r3,r3,32
6: bf cr7*4+1,7f 6: bf cr7*4+1,7f
err3; lvx v3,r0,r4 err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16) VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9 err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16) VPERM(v9,v3,v2,v16)
...@@ -580,7 +580,7 @@ err3; lvx v1,r4,r10 ...@@ -580,7 +580,7 @@ err3; lvx v1,r4,r10
err3; lvx v0,r4,r11 err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16) VPERM(v11,v1,v0,v16)
addi r4,r4,64 addi r4,r4,64
err3; stvx v8,r0,r3 err3; stvx v8,0,r3
err3; stvx v9,r3,r9 err3; stvx v9,r3,r9
err3; stvx v10,r3,r10 err3; stvx v10,r3,r10
err3; stvx v11,r3,r11 err3; stvx v11,r3,r11
...@@ -606,7 +606,7 @@ err3; stvx v11,r3,r11 ...@@ -606,7 +606,7 @@ err3; stvx v11,r3,r11
*/ */
.align 5 .align 5
8: 8:
err4; lvx v7,r0,r4 err4; lvx v7,0,r4
VPERM(v8,v0,v7,v16) VPERM(v8,v0,v7,v16)
err4; lvx v6,r4,r9 err4; lvx v6,r4,r9
VPERM(v9,v7,v6,v16) VPERM(v9,v7,v6,v16)
...@@ -623,7 +623,7 @@ err4; lvx v1,r4,r15 ...@@ -623,7 +623,7 @@ err4; lvx v1,r4,r15
err4; lvx v0,r4,r16 err4; lvx v0,r4,r16
VPERM(v15,v1,v0,v16) VPERM(v15,v1,v0,v16)
addi r4,r4,128 addi r4,r4,128
err4; stvx v8,r0,r3 err4; stvx v8,0,r3
err4; stvx v9,r3,r9 err4; stvx v9,r3,r9
err4; stvx v10,r3,r10 err4; stvx v10,r3,r10
err4; stvx v11,r3,r11 err4; stvx v11,r3,r11
...@@ -644,7 +644,7 @@ err4; stvx v15,r3,r16 ...@@ -644,7 +644,7 @@ err4; stvx v15,r3,r16
mtocrf 0x01,r6 mtocrf 0x01,r6
bf cr7*4+1,9f bf cr7*4+1,9f
err3; lvx v3,r0,r4 err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16) VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9 err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16) VPERM(v9,v3,v2,v16)
...@@ -653,27 +653,27 @@ err3; lvx v1,r4,r10 ...@@ -653,27 +653,27 @@ err3; lvx v1,r4,r10
err3; lvx v0,r4,r11 err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16) VPERM(v11,v1,v0,v16)
addi r4,r4,64 addi r4,r4,64
err3; stvx v8,r0,r3 err3; stvx v8,0,r3
err3; stvx v9,r3,r9 err3; stvx v9,r3,r9
err3; stvx v10,r3,r10 err3; stvx v10,r3,r10
err3; stvx v11,r3,r11 err3; stvx v11,r3,r11
addi r3,r3,64 addi r3,r3,64
9: bf cr7*4+2,10f 9: bf cr7*4+2,10f
err3; lvx v1,r0,r4 err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16) VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9 err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16) VPERM(v9,v1,v0,v16)
addi r4,r4,32 addi r4,r4,32
err3; stvx v8,r0,r3 err3; stvx v8,0,r3
err3; stvx v9,r3,r9 err3; stvx v9,r3,r9
addi r3,r3,32 addi r3,r3,32
10: bf cr7*4+3,11f 10: bf cr7*4+3,11f
err3; lvx v1,r0,r4 err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16) VPERM(v8,v0,v1,v16)
addi r4,r4,16 addi r4,r4,16
err3; stvx v8,r0,r3 err3; stvx v8,0,r3
addi r3,r3,16 addi r3,r3,16
/* Up to 15B to go */ /* Up to 15B to go */
......
...@@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7) ...@@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7)
.machine push .machine push
.machine "power4" .machine "power4"
dcbt r0,r6,0b01000 dcbt 0,r6,0b01000
dcbt r0,r7,0b01010 dcbt 0,r7,0b01010
dcbtst r0,r9,0b01000 dcbtst 0,r9,0b01000
dcbtst r0,r10,0b01010 dcbtst 0,r10,0b01010
eieio eieio
dcbt r0,r8,0b01010 /* GO */ dcbt 0,r8,0b01010 /* GO */
.machine pop .machine pop
beq cr1,.Lunwind_stack_nonvmx_copy beq cr1,.Lunwind_stack_nonvmx_copy
...@@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7) ...@@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7)
li r11,48 li r11,48
bf cr7*4+3,5f bf cr7*4+3,5f
lvx v1,r0,r4 lvx v1,0,r4
addi r4,r4,16 addi r4,r4,16
stvx v1,r0,r3 stvx v1,0,r3
addi r3,r3,16 addi r3,r3,16
5: bf cr7*4+2,6f 5: bf cr7*4+2,6f
lvx v1,r0,r4 lvx v1,0,r4
lvx v0,r4,r9 lvx v0,r4,r9
addi r4,r4,32 addi r4,r4,32
stvx v1,r0,r3 stvx v1,0,r3
stvx v0,r3,r9 stvx v0,r3,r9
addi r3,r3,32 addi r3,r3,32
6: bf cr7*4+1,7f 6: bf cr7*4+1,7f
lvx v3,r0,r4 lvx v3,0,r4
lvx v2,r4,r9 lvx v2,r4,r9
lvx v1,r4,r10 lvx v1,r4,r10
lvx v0,r4,r11 lvx v0,r4,r11
addi r4,r4,64 addi r4,r4,64
stvx v3,r0,r3 stvx v3,0,r3
stvx v2,r3,r9 stvx v2,r3,r9
stvx v1,r3,r10 stvx v1,r3,r10
stvx v0,r3,r11 stvx v0,r3,r11
...@@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7) ...@@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7)
*/ */
.align 5 .align 5
8: 8:
lvx v7,r0,r4 lvx v7,0,r4
lvx v6,r4,r9 lvx v6,r4,r9
lvx v5,r4,r10 lvx v5,r4,r10
lvx v4,r4,r11 lvx v4,r4,r11
...@@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7) ...@@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7)
lvx v1,r4,r15 lvx v1,r4,r15
lvx v0,r4,r16 lvx v0,r4,r16
addi r4,r4,128 addi r4,r4,128
stvx v7,r0,r3 stvx v7,0,r3
stvx v6,r3,r9 stvx v6,r3,r9
stvx v5,r3,r10 stvx v5,r3,r10
stvx v4,r3,r11 stvx v4,r3,r11
...@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7) ...@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6 mtocrf 0x01,r6
bf cr7*4+1,9f bf cr7*4+1,9f
lvx v3,r0,r4 lvx v3,0,r4
lvx v2,r4,r9 lvx v2,r4,r9
lvx v1,r4,r10 lvx v1,r4,r10
lvx v0,r4,r11 lvx v0,r4,r11
addi r4,r4,64 addi r4,r4,64
stvx v3,r0,r3 stvx v3,0,r3
stvx v2,r3,r9 stvx v2,r3,r9
stvx v1,r3,r10 stvx v1,r3,r10
stvx v0,r3,r11 stvx v0,r3,r11
addi r3,r3,64 addi r3,r3,64
9: bf cr7*4+2,10f 9: bf cr7*4+2,10f
lvx v1,r0,r4 lvx v1,0,r4
lvx v0,r4,r9 lvx v0,r4,r9
addi r4,r4,32 addi r4,r4,32
stvx v1,r0,r3 stvx v1,0,r3
stvx v0,r3,r9 stvx v0,r3,r9
addi r3,r3,32 addi r3,r3,32
10: bf cr7*4+3,11f 10: bf cr7*4+3,11f
lvx v1,r0,r4 lvx v1,0,r4
addi r4,r4,16 addi r4,r4,16
stvx v1,r0,r3 stvx v1,0,r3
addi r3,r3,16 addi r3,r3,16
/* Up to 15B to go */ /* Up to 15B to go */
...@@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7) ...@@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7)
addi r4,r4,16 addi r4,r4,16
bf cr7*4+3,5f bf cr7*4+3,5f
lvx v1,r0,r4 lvx v1,0,r4
VPERM(v8,v0,v1,v16) VPERM(v8,v0,v1,v16)
addi r4,r4,16 addi r4,r4,16
stvx v8,r0,r3 stvx v8,0,r3
addi r3,r3,16 addi r3,r3,16
vor v0,v1,v1 vor v0,v1,v1
5: bf cr7*4+2,6f 5: bf cr7*4+2,6f
lvx v1,r0,r4 lvx v1,0,r4
VPERM(v8,v0,v1,v16) VPERM(v8,v0,v1,v16)
lvx v0,r4,r9 lvx v0,r4,r9
VPERM(v9,v1,v0,v16) VPERM(v9,v1,v0,v16)
addi r4,r4,32 addi r4,r4,32
stvx v8,r0,r3 stvx v8,0,r3
stvx v9,r3,r9 stvx v9,r3,r9
addi r3,r3,32 addi r3,r3,32
6: bf cr7*4+1,7f 6: bf cr7*4+1,7f
lvx v3,r0,r4 lvx v3,0,r4
VPERM(v8,v0,v3,v16) VPERM(v8,v0,v3,v16)
lvx v2,r4,r9 lvx v2,r4,r9
VPERM(v9,v3,v2,v16) VPERM(v9,v3,v2,v16)
...@@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7) ...@@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r11 lvx v0,r4,r11
VPERM(v11,v1,v0,v16) VPERM(v11,v1,v0,v16)
addi r4,r4,64 addi r4,r4,64
stvx v8,r0,r3 stvx v8,0,r3
stvx v9,r3,r9 stvx v9,r3,r9
stvx v10,r3,r10 stvx v10,r3,r10
stvx v11,r3,r11 stvx v11,r3,r11
...@@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7) ...@@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7)
*/ */
.align 5 .align 5
8: 8:
lvx v7,r0,r4 lvx v7,0,r4
VPERM(v8,v0,v7,v16) VPERM(v8,v0,v7,v16)
lvx v6,r4,r9 lvx v6,r4,r9
VPERM(v9,v7,v6,v16) VPERM(v9,v7,v6,v16)
...@@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7) ...@@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r16 lvx v0,r4,r16
VPERM(v15,v1,v0,v16) VPERM(v15,v1,v0,v16)
addi r4,r4,128 addi r4,r4,128
stvx v8,r0,r3 stvx v8,0,r3
stvx v9,r3,r9 stvx v9,r3,r9
stvx v10,r3,r10 stvx v10,r3,r10
stvx v11,r3,r11 stvx v11,r3,r11
...@@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7) ...@@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6 mtocrf 0x01,r6
bf cr7*4+1,9f bf cr7*4+1,9f
lvx v3,r0,r4 lvx v3,0,r4
VPERM(v8,v0,v3,v16) VPERM(v8,v0,v3,v16)
lvx v2,r4,r9 lvx v2,r4,r9
VPERM(v9,v3,v2,v16) VPERM(v9,v3,v2,v16)
...@@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7) ...@@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r11 lvx v0,r4,r11
VPERM(v11,v1,v0,v16) VPERM(v11,v1,v0,v16)
addi r4,r4,64 addi r4,r4,64
stvx v8,r0,r3 stvx v8,0,r3
stvx v9,r3,r9 stvx v9,r3,r9
stvx v10,r3,r10 stvx v10,r3,r10
stvx v11,r3,r11 stvx v11,r3,r11
addi r3,r3,64 addi r3,r3,64
9: bf cr7*4+2,10f 9: bf cr7*4+2,10f
lvx v1,r0,r4 lvx v1,0,r4
VPERM(v8,v0,v1,v16) VPERM(v8,v0,v1,v16)
lvx v0,r4,r9 lvx v0,r4,r9
VPERM(v9,v1,v0,v16) VPERM(v9,v1,v0,v16)
addi r4,r4,32 addi r4,r4,32
stvx v8,r0,r3 stvx v8,0,r3
stvx v9,r3,r9 stvx v9,r3,r9
addi r3,r3,32 addi r3,r3,32
10: bf cr7*4+3,11f 10: bf cr7*4+3,11f
lvx v1,r0,r4 lvx v1,0,r4
VPERM(v8,v0,v1,v16) VPERM(v8,v0,v1,v16)
addi r4,r4,16 addi r4,r4,16
stvx v8,r0,r3 stvx v8,0,r3
addi r3,r3,16 addi r3,r3,16
/* Up to 15B to go */ /* Up to 15B to go */
......
...@@ -184,7 +184,7 @@ err1; std r0,8(r3) ...@@ -184,7 +184,7 @@ err1; std r0,8(r3)
mtctr r6 mtctr r6
mr r8,r3 mr r8,r3
14: 14:
err1; dcbz r0,r3 err1; dcbz 0,r3
add r3,r3,r9 add r3,r3,r9
bdnz 14b bdnz 14b
......
...@@ -67,7 +67,7 @@ master: ...@@ -67,7 +67,7 @@ master:
mr %r16,%r3 /* save dt address in reg16 */ mr %r16,%r3 /* save dt address in reg16 */
li %r4,20 li %r4,20
LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */ LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */
cmpwi %r0,%r6,2 /* v2 or later? */ cmpwi %cr0,%r6,2 /* v2 or later? */
blt 1f blt 1f
li %r4,28 li %r4,28
STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */ STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment