Commit 57ce5d3e authored by WANG Xuerui's avatar WANG Xuerui Committed by Huacai Chen

LoongArch: Use the "move" pseudo-instruction where applicable

Some of the assembly code in the LoongArch port likely originated
from a time when the assembler did not support pseudo-instructions like
"move" or "jr", so the desugared form was used and readability suffers
(to a minor degree) as a result.

As the upstream toolchain supports these pseudo-instructions from the
beginning, migrate the existing few usages to them for better
readability.
Signed-off-by: default avatarWANG Xuerui <git@xen0n.name>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent 07b48069
......@@ -157,7 +157,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
__asm__ __volatile__(
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
" addi.w %0, %1, %3 \n"
" or %1, %0, $zero \n"
" move %1, %0 \n"
" blt %0, $zero, 2f \n"
" sc.w %1, %2 \n"
" beq $zero, %1, 1b \n"
......@@ -170,7 +170,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
__asm__ __volatile__(
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
" sub.w %0, %1, %3 \n"
" or %1, %0, $zero \n"
" move %1, %0 \n"
" blt %0, $zero, 2f \n"
" sc.w %1, %2 \n"
" beq $zero, %1, 1b \n"
......@@ -320,7 +320,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
__asm__ __volatile__(
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
" addi.d %0, %1, %3 \n"
" or %1, %0, $zero \n"
" move %1, %0 \n"
" blt %0, $zero, 2f \n"
" sc.d %1, %2 \n"
" beq %1, $zero, 1b \n"
......@@ -333,7 +333,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
__asm__ __volatile__(
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
" sub.d %0, %1, %3 \n"
" or %1, %0, $zero \n"
" move %1, %0 \n"
" blt %0, $zero, 2f \n"
" sc.d %1, %2 \n"
" beq %1, $zero, 1b \n"
......
......@@ -55,7 +55,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
__asm__ __volatile__( \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" or $t0, %z4, $zero \n" \
" move $t0, %z4 \n" \
" " st " $t0, %1 \n" \
" beq $zero, $t0, 1b \n" \
"2: \n" \
......
......@@ -82,7 +82,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
"# futex_atomic_cmpxchg_inatomic \n"
"1: ll.w %1, %3 \n"
" bne %1, %z4, 3f \n"
" or $t0, %z5, $zero \n"
" move $t0, %z5 \n"
"2: sc.w $t0, %2 \n"
" beq $zero, $t0, 1b \n"
"3: \n"
......
......@@ -162,7 +162,7 @@ do { \
"2: \n" \
" .section .fixup,\"ax\" \n" \
"3: li.w %0, %3 \n" \
" or %1, $zero, $zero \n" \
" move %1, $zero \n" \
" b 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
......
......@@ -50,7 +50,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point
/* KSave3 used for percpu base, initialized as 0 */
csrwr zero, PERCPU_BASE_KS
/* GPR21 used for percpu base (runtime), initialized as 0 */
or u0, zero, zero
move u0, zero
la tp, init_thread_union
/* Set the SP after an empty pt_regs. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment