Commit 1e6e57d9 authored by Will Deacon's avatar Will Deacon

arm64: percpu: rewrite ll/sc loops in assembly

Writing the outer loop of an LL/SC sequence using do {...} while
constructs potentially allows the compiler to hoist memory accesses
between the STXR and the branch back to the LDXR. On CPUs that do not
guarantee forward progress of LL/SC loops when faced with memory
accesses to the same ERG (up to 2k) between the failed STXR and the
branch back, we may end up livelocking.

This patch avoids this issue in our percpu atomics by rewriting the
outer loop as part of the LL/SC inline assembly block.

Cc: <stable@vger.kernel.org>
Fixes: f97fc810 ("arm64: percpu: Implement this_cpu operations")
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 1c5b51df
...@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \ ...@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
\ \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
do { \
asm ("//__per_cpu_" #op "_1\n" \ asm ("//__per_cpu_" #op "_1\n" \
"ldxrb %w[ret], %[ptr]\n" \ "1: ldxrb %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \
"stxrb %w[loop], %w[ret], %[ptr]\n" \ " stxrb %w[loop], %w[ret], %[ptr]\n" \
" cbnz %w[loop], 1b" \
: [loop] "=&r" (loop), [ret] "=&r" (ret), \ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
[ptr] "+Q"(*(u8 *)ptr) \ [ptr] "+Q"(*(u8 *)ptr) \
: [val] "Ir" (val)); \ : [val] "Ir" (val)); \
} while (loop); \
break; \ break; \
case 2: \ case 2: \
do { \
asm ("//__per_cpu_" #op "_2\n" \ asm ("//__per_cpu_" #op "_2\n" \
"ldxrh %w[ret], %[ptr]\n" \ "1: ldxrh %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \
"stxrh %w[loop], %w[ret], %[ptr]\n" \ " stxrh %w[loop], %w[ret], %[ptr]\n" \
" cbnz %w[loop], 1b" \
: [loop] "=&r" (loop), [ret] "=&r" (ret), \ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
[ptr] "+Q"(*(u16 *)ptr) \ [ptr] "+Q"(*(u16 *)ptr) \
: [val] "Ir" (val)); \ : [val] "Ir" (val)); \
} while (loop); \
break; \ break; \
case 4: \ case 4: \
do { \
asm ("//__per_cpu_" #op "_4\n" \ asm ("//__per_cpu_" #op "_4\n" \
"ldxr %w[ret], %[ptr]\n" \ "1: ldxr %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \ #asm_op " %w[ret], %w[ret], %w[val]\n" \
"stxr %w[loop], %w[ret], %[ptr]\n" \ " stxr %w[loop], %w[ret], %[ptr]\n" \
" cbnz %w[loop], 1b" \
: [loop] "=&r" (loop), [ret] "=&r" (ret), \ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
[ptr] "+Q"(*(u32 *)ptr) \ [ptr] "+Q"(*(u32 *)ptr) \
: [val] "Ir" (val)); \ : [val] "Ir" (val)); \
} while (loop); \
break; \ break; \
case 8: \ case 8: \
do { \
asm ("//__per_cpu_" #op "_8\n" \ asm ("//__per_cpu_" #op "_8\n" \
"ldxr %[ret], %[ptr]\n" \ "1: ldxr %[ret], %[ptr]\n" \
#asm_op " %[ret], %[ret], %[val]\n" \ #asm_op " %[ret], %[ret], %[val]\n" \
"stxr %w[loop], %[ret], %[ptr]\n" \ " stxr %w[loop], %[ret], %[ptr]\n" \
" cbnz %w[loop], 1b" \
: [loop] "=&r" (loop), [ret] "=&r" (ret), \ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
[ptr] "+Q"(*(u64 *)ptr) \ [ptr] "+Q"(*(u64 *)ptr) \
: [val] "Ir" (val)); \ : [val] "Ir" (val)); \
} while (loop); \
break; \ break; \
default: \ default: \
BUILD_BUG(); \ BUILD_BUG(); \
...@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, ...@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
switch (size) { switch (size) {
case 1: case 1:
do {
asm ("//__percpu_xchg_1\n" asm ("//__percpu_xchg_1\n"
"ldxrb %w[ret], %[ptr]\n" "1: ldxrb %w[ret], %[ptr]\n"
"stxrb %w[loop], %w[val], %[ptr]\n" " stxrb %w[loop], %w[val], %[ptr]\n"
" cbnz %w[loop], 1b"
: [loop] "=&r"(loop), [ret] "=&r"(ret), : [loop] "=&r"(loop), [ret] "=&r"(ret),
[ptr] "+Q"(*(u8 *)ptr) [ptr] "+Q"(*(u8 *)ptr)
: [val] "r" (val)); : [val] "r" (val));
} while (loop);
break; break;
case 2: case 2:
do {
asm ("//__percpu_xchg_2\n" asm ("//__percpu_xchg_2\n"
"ldxrh %w[ret], %[ptr]\n" "1: ldxrh %w[ret], %[ptr]\n"
"stxrh %w[loop], %w[val], %[ptr]\n" " stxrh %w[loop], %w[val], %[ptr]\n"
" cbnz %w[loop], 1b"
: [loop] "=&r"(loop), [ret] "=&r"(ret), : [loop] "=&r"(loop), [ret] "=&r"(ret),
[ptr] "+Q"(*(u16 *)ptr) [ptr] "+Q"(*(u16 *)ptr)
: [val] "r" (val)); : [val] "r" (val));
} while (loop);
break; break;
case 4: case 4:
do {
asm ("//__percpu_xchg_4\n" asm ("//__percpu_xchg_4\n"
"ldxr %w[ret], %[ptr]\n" "1: ldxr %w[ret], %[ptr]\n"
"stxr %w[loop], %w[val], %[ptr]\n" " stxr %w[loop], %w[val], %[ptr]\n"
" cbnz %w[loop], 1b"
: [loop] "=&r"(loop), [ret] "=&r"(ret), : [loop] "=&r"(loop), [ret] "=&r"(ret),
[ptr] "+Q"(*(u32 *)ptr) [ptr] "+Q"(*(u32 *)ptr)
: [val] "r" (val)); : [val] "r" (val));
} while (loop);
break; break;
case 8: case 8:
do {
asm ("//__percpu_xchg_8\n" asm ("//__percpu_xchg_8\n"
"ldxr %[ret], %[ptr]\n" "1: ldxr %[ret], %[ptr]\n"
"stxr %w[loop], %[val], %[ptr]\n" " stxr %w[loop], %[val], %[ptr]\n"
" cbnz %w[loop], 1b"
: [loop] "=&r"(loop), [ret] "=&r"(ret), : [loop] "=&r"(loop), [ret] "=&r"(ret),
[ptr] "+Q"(*(u64 *)ptr) [ptr] "+Q"(*(u64 *)ptr)
: [val] "r" (val)); : [val] "r" (val));
} while (loop);
break; break;
default: default:
BUILD_BUG(); BUILD_BUG();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment