Commit a7ec177e authored by Andreas Larsson's avatar Andreas Larsson

sparc32: Fix truncated relocation errors when linking large kernels

Use jumps instead of branches when jumping from one section to another
to avoid branches to addresses further away than 22 bit offsets can
handle that results in errors such as

arch/sparc/kernel/signal_32.o:(.fixup+0x0): relocation truncated to fit: R_SPARC_WDISP22 against `.text'

This is the same approach that was taken for sparc64 in commit
52eb053b ("[SPARC64]: Fix linkage of enormous kernels.")
Reported-by: default avatarkernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202405080936.tWaJdO3P-lkp@intel.com/
Closes: https://lore.kernel.org/oe-kbuild-all/202406240441.5zaoshVX-lkp@intel.com/
Link: https://lore.kernel.org/r/20240710092341.457591-1-andreas@gaisler.comSigned-off-by: default avatarAndreas Larsson <andreas@gaisler.com>
parent e51f125b
...@@ -95,7 +95,8 @@ __asm__ __volatile__( \ ...@@ -95,7 +95,8 @@ __asm__ __volatile__( \
".section .fixup,#alloc,#execinstr\n\t" \ ".section .fixup,#alloc,#execinstr\n\t" \
".align 4\n" \ ".align 4\n" \
"3:\n\t" \ "3:\n\t" \
"b 2b\n\t" \ "sethi %%hi(2b), %0\n\t" \
"jmpl %0 + %%lo(2b), %%g0\n\t" \
" mov %3, %0\n\t" \ " mov %3, %0\n\t" \
".previous\n\n\t" \ ".previous\n\n\t" \
".section __ex_table,#alloc\n\t" \ ".section __ex_table,#alloc\n\t" \
...@@ -163,8 +164,9 @@ __asm__ __volatile__( \ ...@@ -163,8 +164,9 @@ __asm__ __volatile__( \
".section .fixup,#alloc,#execinstr\n\t" \ ".section .fixup,#alloc,#execinstr\n\t" \
".align 4\n" \ ".align 4\n" \
"3:\n\t" \ "3:\n\t" \
"sethi %%hi(2b), %0\n\t" \
"clr %1\n\t" \ "clr %1\n\t" \
"b 2b\n\t" \ "jmpl %0 + %%lo(2b), %%g0\n\t" \
" mov %3, %0\n\n\t" \ " mov %3, %0\n\n\t" \
".previous\n\t" \ ".previous\n\t" \
".section __ex_table,#alloc\n\t" \ ".section __ex_table,#alloc\n\t" \
......
...@@ -118,9 +118,12 @@ current_pc: ...@@ -118,9 +118,12 @@ current_pc:
mov %o7, %g3 mov %o7, %g3
tst %o0 tst %o0
be no_sun4u_here bne 2f
mov %g4, %o7 /* Previous %o7. */ mov %g4, %o7 /* Previous %o7. */
sethi %hi(no_sun4u_here), %l1
jmpl %l1 + %lo(no_sun4u_here), %g0
nop
2:
mov %o0, %l0 ! stash away romvec mov %o0, %l0 ! stash away romvec
mov %o0, %g7 ! put it here too mov %o0, %g7 ! put it here too
mov %o1, %l1 ! stash away debug_vec too mov %o1, %l1 ! stash away debug_vec too
...@@ -195,7 +198,8 @@ halt_notsup: ...@@ -195,7 +198,8 @@ halt_notsup:
sub %o0, %l6, %o0 sub %o0, %l6, %o0
call %o1 call %o1
nop nop
ba halt_me sethi %hi(halt_me), %o0
jmpl %o0 + %lo(halt_me), %g0
nop nop
not_a_sun4: not_a_sun4:
...@@ -431,8 +435,11 @@ leon_init: ...@@ -431,8 +435,11 @@ leon_init:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
ldub [%g2 + %lo(boot_cpu_id)], %g1 ldub [%g2 + %lo(boot_cpu_id)], %g1
cmp %g1, 0xff ! unset means first CPU cmp %g1, 0xff ! unset means first CPU
bne leon_smp_cpu_startup ! continue only with master be 1f
sethi %hi(leon_smp_cpu_startup), %g1
jmpl %g1 + %lo(leon_smp_cpu_startup), %g0
nop nop
1:
#endif #endif
/* Get CPU-ID from most significant 4-bit of ASR17 */ /* Get CPU-ID from most significant 4-bit of ASR17 */
rd %asr17, %g1 rd %asr17, %g1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment