Commit b18f7296 authored by Clément Léger's avatar Clément Léger Committed by Palmer Dabbelt

riscv: use ".L" local labels in assembly when applicable

For the sake of coherency, use local labels in assembly when
applicable. This also avoid kprobes being confused when applying a
kprobe since the size of function is computed by checking where the
next visible symbol is located. This might end up in computing some
function size to be way shorter than expected and thus failing to apply
kprobes to the specified offset.
Signed-off-by: default avatarClément Léger <cleger@rivosinc.com>
Reviewed-by: default avatarAndrew Jones <ajones@ventanamicro.com>
Link: https://lore.kernel.org/r/20231024132655.730417-2-cleger@rivosinc.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 57a4542c
...@@ -26,9 +26,9 @@ SYM_CODE_START(handle_exception) ...@@ -26,9 +26,9 @@ SYM_CODE_START(handle_exception)
* register will contain 0, and we should continue on the current TP. * register will contain 0, and we should continue on the current TP.
*/ */
csrrw tp, CSR_SCRATCH, tp csrrw tp, CSR_SCRATCH, tp
bnez tp, _save_context bnez tp, .Lsave_context
_restore_kernel_tpsp: .Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp) REG_S sp, TASK_TI_KERNEL_SP(tp)
...@@ -40,7 +40,7 @@ _restore_kernel_tpsp: ...@@ -40,7 +40,7 @@ _restore_kernel_tpsp:
REG_L sp, TASK_TI_KERNEL_SP(tp) REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif #endif
_save_context: .Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp) REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp) REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK) addi sp, sp, -(PT_SIZE_ON_STACK)
......
...@@ -164,12 +164,12 @@ secondary_start_sbi: ...@@ -164,12 +164,12 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a0 XIP_FIXUP_OFFSET a0
call relocate_enable_mmu call relocate_enable_mmu
#endif #endif
call setup_trap_vector call .Lsetup_trap_vector
tail smp_callin tail smp_callin
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
.align 2 .align 2
setup_trap_vector: .Lsetup_trap_vector:
/* Set trap vector to exception handler */ /* Set trap vector to exception handler */
la a0, handle_exception la a0, handle_exception
csrw CSR_TVEC, a0 csrw CSR_TVEC, a0
...@@ -206,7 +206,7 @@ ENTRY(_start_kernel) ...@@ -206,7 +206,7 @@ ENTRY(_start_kernel)
* not implement PMPs, so we set up a quick trap handler to just skip * not implement PMPs, so we set up a quick trap handler to just skip
* touching the PMPs on any trap. * touching the PMPs on any trap.
*/ */
la a0, pmp_done la a0, .Lpmp_done
csrw CSR_TVEC, a0 csrw CSR_TVEC, a0
li a0, -1 li a0, -1
...@@ -214,7 +214,7 @@ ENTRY(_start_kernel) ...@@ -214,7 +214,7 @@ ENTRY(_start_kernel)
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0 csrw CSR_PMPCFG0, a0
.align 2 .align 2
pmp_done: .Lpmp_done:
/* /*
* The hartid in a0 is expected later on, and we have no firmware * The hartid in a0 is expected later on, and we have no firmware
...@@ -275,12 +275,12 @@ pmp_done: ...@@ -275,12 +275,12 @@ pmp_done:
/* Clear BSS for flat non-ELF images */ /* Clear BSS for flat non-ELF images */
la a3, __bss_start la a3, __bss_start
la a4, __bss_stop la a4, __bss_stop
ble a4, a3, clear_bss_done ble a4, a3, .Lclear_bss_done
clear_bss: .Lclear_bss:
REG_S zero, (a3) REG_S zero, (a3)
add a3, a3, RISCV_SZPTR add a3, a3, RISCV_SZPTR
blt a3, a4, clear_bss blt a3, a4, .Lclear_bss
clear_bss_done: .Lclear_bss_done:
#endif #endif
la a2, boot_cpu_hartid la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2 XIP_FIXUP_OFFSET a2
...@@ -305,7 +305,7 @@ clear_bss_done: ...@@ -305,7 +305,7 @@ clear_bss_done:
call relocate_enable_mmu call relocate_enable_mmu
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
call setup_trap_vector call .Lsetup_trap_vector
/* Restore C environment */ /* Restore C environment */
la tp, init_task la tp, init_task
la sp, init_thread_union + THREAD_SIZE la sp, init_thread_union + THREAD_SIZE
......
...@@ -85,16 +85,16 @@ ENTRY(MCOUNT_NAME) ...@@ -85,16 +85,16 @@ ENTRY(MCOUNT_NAME)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return la t0, ftrace_graph_return
REG_L t1, 0(t0) REG_L t1, 0(t0)
bne t1, t4, do_ftrace_graph_caller bne t1, t4, .Ldo_ftrace_graph_caller
la t3, ftrace_graph_entry la t3, ftrace_graph_entry
REG_L t2, 0(t3) REG_L t2, 0(t3)
la t6, ftrace_graph_entry_stub la t6, ftrace_graph_entry_stub
bne t2, t6, do_ftrace_graph_caller bne t2, t6, .Ldo_ftrace_graph_caller
#endif #endif
la t3, ftrace_trace_function la t3, ftrace_trace_function
REG_L t5, 0(t3) REG_L t5, 0(t3)
bne t5, t4, do_trace bne t5, t4, .Ldo_trace
ret ret
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -102,7 +102,7 @@ ENTRY(MCOUNT_NAME) ...@@ -102,7 +102,7 @@ ENTRY(MCOUNT_NAME)
* A pseudo representation for the function graph tracer: * A pseudo representation for the function graph tracer:
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller) * prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
*/ */
do_ftrace_graph_caller: .Ldo_ftrace_graph_caller:
addi a0, s0, -SZREG addi a0, s0, -SZREG
mv a1, ra mv a1, ra
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
...@@ -118,7 +118,7 @@ do_ftrace_graph_caller: ...@@ -118,7 +118,7 @@ do_ftrace_graph_caller:
* A pseudo representation for the function tracer: * A pseudo representation for the function tracer:
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller) * (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
*/ */
do_trace: .Ldo_trace:
REG_L a1, -SZREG(s0) REG_L a1, -SZREG(s0)
mv a0, ra mv a0, ra
......
...@@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove) ...@@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove)
*/ */
/* Return if nothing to do */ /* Return if nothing to do */
beq a0, a1, return_from_memmove beq a0, a1, .Lreturn_from_memmove
beqz a2, return_from_memmove beqz a2, .Lreturn_from_memmove
/* /*
* Register Uses * Register Uses
...@@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove) ...@@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove)
* small enough not to bother. * small enough not to bother.
*/ */
andi t0, a2, -(2 * SZREG) andi t0, a2, -(2 * SZREG)
beqz t0, byte_copy beqz t0, .Lbyte_copy
/* /*
* Now solve for t5 and t6. * Now solve for t5 and t6.
...@@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove) ...@@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove)
*/ */
xor t0, a0, a1 xor t0, a0, a1
andi t1, t0, (SZREG - 1) andi t1, t0, (SZREG - 1)
beqz t1, coaligned_copy beqz t1, .Lcoaligned_copy
/* Fall through to misaligned fixup copy */ /* Fall through to misaligned fixup copy */
misaligned_fixup_copy: .Lmisaligned_fixup_copy:
bltu a1, a0, misaligned_fixup_copy_reverse bltu a1, a0, .Lmisaligned_fixup_copy_reverse
misaligned_fixup_copy_forward: .Lmisaligned_fixup_copy_forward:
jal t0, byte_copy_until_aligned_forward jal t0, .Lbyte_copy_until_aligned_forward
andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */ andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */ slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
...@@ -153,10 +153,10 @@ misaligned_fixup_copy_forward: ...@@ -153,10 +153,10 @@ misaligned_fixup_copy_forward:
mv t3, t6 /* Fix the dest pointer in case the loop was broken */ mv t3, t6 /* Fix the dest pointer in case the loop was broken */
add a1, t3, a5 /* Restore the src pointer */ add a1, t3, a5 /* Restore the src pointer */
j byte_copy_forward /* Copy any remaining bytes */ j .Lbyte_copy_forward /* Copy any remaining bytes */
misaligned_fixup_copy_reverse: .Lmisaligned_fixup_copy_reverse:
jal t0, byte_copy_until_aligned_reverse jal t0, .Lbyte_copy_until_aligned_reverse
andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */ andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */ slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
...@@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse: ...@@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse:
mv t4, t5 /* Fix the dest pointer in case the loop was broken */ mv t4, t5 /* Fix the dest pointer in case the loop was broken */
add a4, t4, a5 /* Restore the src pointer */ add a4, t4, a5 /* Restore the src pointer */
j byte_copy_reverse /* Copy any remaining bytes */ j .Lbyte_copy_reverse /* Copy any remaining bytes */
/* /*
* Simple copy loops for SZREG co-aligned memory locations. * Simple copy loops for SZREG co-aligned memory locations.
* These also make calls to do byte copies for any unaligned * These also make calls to do byte copies for any unaligned
* data at their terminations. * data at their terminations.
*/ */
coaligned_copy: .Lcoaligned_copy:
bltu a1, a0, coaligned_copy_reverse bltu a1, a0, .Lcoaligned_copy_reverse
coaligned_copy_forward: .Lcoaligned_copy_forward:
jal t0, byte_copy_until_aligned_forward jal t0, .Lbyte_copy_until_aligned_forward
1: 1:
REG_L t1, ( 0 * SZREG)(a1) REG_L t1, ( 0 * SZREG)(a1)
...@@ -235,10 +235,10 @@ coaligned_copy_forward: ...@@ -235,10 +235,10 @@ coaligned_copy_forward:
REG_S t1, (-1 * SZREG)(t3) REG_S t1, (-1 * SZREG)(t3)
bne t3, t6, 1b bne t3, t6, 1b
j byte_copy_forward /* Copy any remaining bytes */ j .Lbyte_copy_forward /* Copy any remaining bytes */
coaligned_copy_reverse: .Lcoaligned_copy_reverse:
jal t0, byte_copy_until_aligned_reverse jal t0, .Lbyte_copy_until_aligned_reverse
1: 1:
REG_L t1, (-1 * SZREG)(a4) REG_L t1, (-1 * SZREG)(a4)
...@@ -247,7 +247,7 @@ coaligned_copy_reverse: ...@@ -247,7 +247,7 @@ coaligned_copy_reverse:
REG_S t1, ( 0 * SZREG)(t4) REG_S t1, ( 0 * SZREG)(t4)
bne t4, t5, 1b bne t4, t5, 1b
j byte_copy_reverse /* Copy any remaining bytes */ j .Lbyte_copy_reverse /* Copy any remaining bytes */
/* /*
* These are basically sub-functions within the function. They * These are basically sub-functions within the function. They
...@@ -258,7 +258,7 @@ coaligned_copy_reverse: ...@@ -258,7 +258,7 @@ coaligned_copy_reverse:
* up from where they were left and we avoid code duplication * up from where they were left and we avoid code duplication
* without any overhead except the call in and return jumps. * without any overhead except the call in and return jumps.
*/ */
byte_copy_until_aligned_forward: .Lbyte_copy_until_aligned_forward:
beq t3, t5, 2f beq t3, t5, 2f
1: 1:
lb t1, 0(a1) lb t1, 0(a1)
...@@ -269,7 +269,7 @@ byte_copy_until_aligned_forward: ...@@ -269,7 +269,7 @@ byte_copy_until_aligned_forward:
2: 2:
jalr zero, 0x0(t0) /* Return to multibyte copy loop */ jalr zero, 0x0(t0) /* Return to multibyte copy loop */
byte_copy_until_aligned_reverse: .Lbyte_copy_until_aligned_reverse:
beq t4, t6, 2f beq t4, t6, 2f
1: 1:
lb t1, -1(a4) lb t1, -1(a4)
...@@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse: ...@@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse:
* These will byte copy until they reach the end of data to copy. * These will byte copy until they reach the end of data to copy.
* At that point, they will call to return from memmove. * At that point, they will call to return from memmove.
*/ */
byte_copy: .Lbyte_copy:
bltu a1, a0, byte_copy_reverse bltu a1, a0, .Lbyte_copy_reverse
byte_copy_forward: .Lbyte_copy_forward:
beq t3, t4, 2f beq t3, t4, 2f
1: 1:
lb t1, 0(a1) lb t1, 0(a1)
...@@ -299,7 +299,7 @@ byte_copy_forward: ...@@ -299,7 +299,7 @@ byte_copy_forward:
2: 2:
ret ret
byte_copy_reverse: .Lbyte_copy_reverse:
beq t4, t3, 2f beq t4, t3, 2f
1: 1:
lb t1, -1(a4) lb t1, -1(a4)
...@@ -309,7 +309,7 @@ byte_copy_reverse: ...@@ -309,7 +309,7 @@ byte_copy_reverse:
bne t4, t3, 1b bne t4, t3, 1b
2: 2:
return_from_memmove: .Lreturn_from_memmove:
ret ret
SYM_FUNC_END(memmove) SYM_FUNC_END(memmove)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment