Commit 2245f924 authored by Victor Kamensky's avatar Victor Kamensky Committed by Ben Dooks

ARM: atomic64: fix endian-ness in atomic.h

Fix inline asm for atomic64_xxx functions in arm atomic.h. Instead of
%H operand specifiers code should use %Q for least significant part
of the value, and %R for the most significant part of the value. %H
always returns the higher of the two register numbers, and therefore
it is not endian neutral. %H should be used with ldrexd and strexd
instructions.
Signed-off-by: default avatarVictor Kamensky <victor.kamensky@linaro.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarBen Dooks <ben.dooks@codethink.co.uk>
parent 5a8b93fc
...@@ -301,8 +301,8 @@ static inline void atomic64_add(u64 i, atomic64_t *v) ...@@ -301,8 +301,8 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add\n" __asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
" adds %0, %0, %4\n" " adds %Q0, %Q0, %Q4\n"
" adc %H0, %H0, %H4\n" " adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n" " strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n" " teq %1, #0\n"
" bne 1b" " bne 1b"
...@@ -320,8 +320,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) ...@@ -320,8 +320,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add_return\n" __asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
" adds %0, %0, %4\n" " adds %Q0, %Q0, %Q4\n"
" adc %H0, %H0, %H4\n" " adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n" " strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n" " teq %1, #0\n"
" bne 1b" " bne 1b"
...@@ -341,8 +341,8 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) ...@@ -341,8 +341,8 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub\n" __asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
" subs %0, %0, %4\n" " subs %Q0, %Q0, %Q4\n"
" sbc %H0, %H0, %H4\n" " sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n" " strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n" " teq %1, #0\n"
" bne 1b" " bne 1b"
...@@ -360,8 +360,8 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) ...@@ -360,8 +360,8 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub_return\n" __asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
" subs %0, %0, %4\n" " subs %Q0, %Q0, %Q4\n"
" sbc %H0, %H0, %H4\n" " sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n" " strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n" " teq %1, #0\n"
" bne 1b" " bne 1b"
...@@ -428,9 +428,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v) ...@@ -428,9 +428,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
__asm__ __volatile__("@ atomic64_dec_if_positive\n" __asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
" subs %0, %0, #1\n" " subs %Q0, %Q0, #1\n"
" sbc %H0, %H0, #0\n" " sbc %R0, %R0, #0\n"
" teq %H0, #0\n" " teq %R0, #0\n"
" bmi 2f\n" " bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n" " strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n" " teq %1, #0\n"
...@@ -459,8 +459,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) ...@@ -459,8 +459,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
" teqeq %H0, %H5\n" " teqeq %H0, %H5\n"
" moveq %1, #0\n" " moveq %1, #0\n"
" beq 2f\n" " beq 2f\n"
" adds %0, %0, %6\n" " adds %Q0, %Q0, %Q6\n"
" adc %H0, %H0, %H6\n" " adc %R0, %R0, %R6\n"
" strexd %2, %0, %H0, [%4]\n" " strexd %2, %0, %H0, [%4]\n"
" teq %2, #0\n" " teq %2, #0\n"
" bne 1b\n" " bne 1b\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment