Commit 022ccef6 authored by Will Deacon's avatar Will Deacon Committed by Greg Kroah-Hartman

arm64: futex: Bound number of LDXR/STXR loops in FUTEX_WAKE_OP

commit 03110a5c upstream.

Our futex implementation makes use of LDXR/STXR loops to perform atomic
updates to user memory from atomic context. This can lead to latency
problems if we end up spinning around the LL/SC sequence at the expense
of doing something useful.

Rework our futex atomic operations so that we return -EAGAIN if we fail
to update the futex word after 128 attempts. The core futex code will
reschedule if necessary and we'll try again later.

Cc: <stable@kernel.org>
Fixes: 6170a974 ("arm64: Atomic operations")
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 325d0a18
...@@ -23,26 +23,34 @@ ...@@ -23,26 +23,34 @@
#include <asm/errno.h> #include <asm/errno.h>
#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
do { \ do { \
unsigned int loops = FUTEX_MAX_LOOPS; \
\
uaccess_enable(); \ uaccess_enable(); \
asm volatile( \ asm volatile( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \ "1: ldxr %w1, %2\n" \
insn "\n" \ insn "\n" \
"2: stlxr %w0, %w3, %2\n" \ "2: stlxr %w0, %w3, %2\n" \
" cbnz %w0, 1b\n" \ " cbz %w0, 3f\n" \
" dmb ish\n" \ " sub %w4, %w4, %w0\n" \
" cbnz %w4, 1b\n" \
" mov %w0, %w7\n" \
"3:\n" \ "3:\n" \
" dmb ish\n" \
" .pushsection .fixup,\"ax\"\n" \ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \ " .align 2\n" \
"4: mov %w0, %w5\n" \ "4: mov %w0, %w6\n" \
" b 3b\n" \ " b 3b\n" \
" .popsection\n" \ " .popsection\n" \
_ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \ _ASM_EXTABLE(2b, 4b) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
: "r" (oparg), "Ir" (-EFAULT) \ "+r" (loops) \
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
: "memory"); \ : "memory"); \
uaccess_disable(); \ uaccess_disable(); \
} while (0) } while (0)
...@@ -57,23 +65,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) ...@@ -57,23 +65,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
__futex_atomic_op("mov %w3, %w4", __futex_atomic_op("mov %w3, %w5",
ret, oldval, uaddr, tmp, oparg); ret, oldval, uaddr, tmp, oparg);
break; break;
case FUTEX_OP_ADD: case FUTEX_OP_ADD:
__futex_atomic_op("add %w3, %w1, %w4", __futex_atomic_op("add %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg); ret, oldval, uaddr, tmp, oparg);
break; break;
case FUTEX_OP_OR: case FUTEX_OP_OR:
__futex_atomic_op("orr %w3, %w1, %w4", __futex_atomic_op("orr %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg); ret, oldval, uaddr, tmp, oparg);
break; break;
case FUTEX_OP_ANDN: case FUTEX_OP_ANDN:
__futex_atomic_op("and %w3, %w1, %w4", __futex_atomic_op("and %w3, %w1, %w5",
ret, oldval, uaddr, tmp, ~oparg); ret, oldval, uaddr, tmp, ~oparg);
break; break;
case FUTEX_OP_XOR: case FUTEX_OP_XOR:
__futex_atomic_op("eor %w3, %w1, %w4", __futex_atomic_op("eor %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg); ret, oldval, uaddr, tmp, oparg);
break; break;
default: default:
...@@ -93,6 +101,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, ...@@ -93,6 +101,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
int ret = 0; int ret = 0;
unsigned int loops = FUTEX_MAX_LOOPS;
u32 val, tmp; u32 val, tmp;
u32 __user *uaddr; u32 __user *uaddr;
...@@ -104,20 +113,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, ...@@ -104,20 +113,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
asm volatile("// futex_atomic_cmpxchg_inatomic\n" asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n" " prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n" "1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n" " sub %w3, %w1, %w5\n"
" cbnz %w3, 3f\n" " cbnz %w3, 4f\n"
"2: stlxr %w3, %w5, %2\n" "2: stlxr %w3, %w6, %2\n"
" cbnz %w3, 1b\n" " cbz %w3, 3f\n"
" dmb ish\n" " sub %w4, %w4, %w3\n"
" cbnz %w4, 1b\n"
" mov %w0, %w8\n"
"3:\n" "3:\n"
" dmb ish\n"
"4:\n"
" .pushsection .fixup,\"ax\"\n" " .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n" "5: mov %w0, %w7\n"
" b 3b\n" " b 4b\n"
" .popsection\n" " .popsection\n"
_ASM_EXTABLE(1b, 4b) _ASM_EXTABLE(1b, 5b)
_ASM_EXTABLE(2b, 4b) _ASM_EXTABLE(2b, 5b)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
: "memory"); : "memory");
uaccess_disable(); uaccess_disable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment