Commit 07b48069 authored by WANG Xuerui's avatar WANG Xuerui Committed by Huacai Chen

LoongArch: Use the "jr" pseudo-instruction where applicable

Some of the assembly code in the LoongArch port likely originated
from a time when the assembler did not support pseudo-instructions like
"move" or "jr", so the desugared form was used and readability suffers
(to a minor degree) as a result.

As the upstream toolchain supports these pseudo-instructions from the
beginning, migrate the existing few usages to them for better
readability.
Signed-off-by: default avatarWANG Xuerui <git@xen0n.name>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent d8e7f201
......@@ -153,7 +153,7 @@ SYM_FUNC_START(_save_fp)
fpu_save_csr a0 t1
fpu_save_double a0 t1 # clobbers t1
fpu_save_cc a0 t1 t2 # clobbers t1, t2
jirl zero, ra, 0
jr ra
SYM_FUNC_END(_save_fp)
EXPORT_SYMBOL(_save_fp)
......@@ -164,7 +164,7 @@ SYM_FUNC_START(_restore_fp)
fpu_restore_double a0 t1 # clobbers t1
fpu_restore_csr a0 t1
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
jirl zero, ra, 0
jr ra
SYM_FUNC_END(_restore_fp)
/*
......@@ -216,7 +216,7 @@ SYM_FUNC_START(_init_fpu)
movgr2fr.d $f30, t1
movgr2fr.d $f31, t1
jirl zero, ra, 0
jr ra
SYM_FUNC_END(_init_fpu)
/*
......@@ -229,7 +229,7 @@ SYM_FUNC_START(_save_fp_context)
sc_save_fcsr a2 t1
sc_save_fp a0
li.w a0, 0 # success
jirl zero, ra, 0
jr ra
SYM_FUNC_END(_save_fp_context)
/*
......@@ -242,10 +242,10 @@ SYM_FUNC_START(_restore_fp_context)
sc_restore_fcc a1 t1 t2
sc_restore_fcsr a2 t1
li.w a0, 0 # success
jirl zero, ra, 0
jr ra
SYM_FUNC_END(_restore_fp_context)
SYM_FUNC_START(fault)
li.w a0, -EFAULT # failure
jirl zero, ra, 0
jr ra
SYM_FUNC_END(fault)
......@@ -28,7 +28,7 @@ SYM_FUNC_START(__arch_cpu_idle)
nop
idle 0
/* end of rollback region */
1: jirl zero, ra, 0
1: jr ra
SYM_FUNC_END(__arch_cpu_idle)
SYM_FUNC_START(handle_vint)
......@@ -91,5 +91,5 @@ SYM_FUNC_END(except_vec_cex)
SYM_FUNC_START(handle_sys)
la.abs t0, handle_syscall
jirl zero, t0, 0
jr t0
SYM_FUNC_END(handle_sys)
......@@ -32,7 +32,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
/* We might not get launched at the address the kernel is linked to,
so we jump there. */
la.abs t0, 0f
jirl zero, t0, 0
jr t0
0:
la t0, __bss_start # clear .bss
st.d zero, t0, 0
......@@ -86,7 +86,7 @@ SYM_CODE_START(smpboot_entry)
ld.d tp, t0, CPU_BOOT_TINFO
la.abs t0, 0f
jirl zero, t0, 0
jr t0
0:
bl start_secondary
SYM_CODE_END(smpboot_entry)
......
......@@ -32,7 +32,7 @@ SYM_FUNC_START(clear_page)
st.d zero, a0, -8
bne t0, a0, 1b
jirl zero, ra, 0
jr ra
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
......@@ -79,6 +79,6 @@ SYM_FUNC_START(copy_page)
st.d t7, a0, -8
bne t8, a0, 1b
jirl zero, ra, 0
jr ra
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
......@@ -198,7 +198,7 @@ nopage_tlb_load:
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_0
jirl zero, t0, 0
jr t0
SYM_FUNC_END(handle_tlb_load)
SYM_FUNC_START(handle_tlb_store)
......@@ -366,7 +366,7 @@ nopage_tlb_store:
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1
jirl zero, t0, 0
jr t0
SYM_FUNC_END(handle_tlb_store)
SYM_FUNC_START(handle_tlb_modify)
......@@ -525,7 +525,7 @@ nopage_tlb_modify:
dbar 0
csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1
jirl zero, t0, 0
jr t0
SYM_FUNC_END(handle_tlb_modify)
SYM_FUNC_START(handle_tlb_refill)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment