Commit 6da75397 authored by Richard Henderson's avatar Richard Henderson Committed by Matt Turner

alpha: Improve atomic_add_unless

Use ll/sc loops instead of C loops around cmpxchg.
Update the atomic64_add_unless block comment to match the code.
Reviewed-and-Tested-by: default avatarMatt Turner <mattst88@gmail.com>
Signed-off-by: default avatarMatt Turner <mattst88@gmail.com>
Signed-off-by: default avatarRichard Henderson <rth@twiddle.net>
parent a5c6eae4
...@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
*/ */
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
int c, old; int c, new, old;
c = atomic_read(v); smp_mb();
for (;;) { __asm__ __volatile__(
if (unlikely(c == (u))) "1: ldl_l %[old],%[mem]\n"
break; " cmpeq %[old],%[u],%[c]\n"
old = atomic_cmpxchg((v), c, c + (a)); " addl %[old],%[a],%[new]\n"
if (likely(old == c)) " bne %[c],2f\n"
break; " stl_c %[new],%[mem]\n"
c = old; " beq %[new],3f\n"
} "2:\n"
return c; ".subsection 2\n"
"3: br 1b\n"
".previous"
: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
: "memory");
smp_mb();
return old;
} }
...@@ -207,21 +214,28 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -207,21 +214,28 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
* *
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v. * Returns true iff @v was not @u.
*/ */
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{ {
long c, old; long c, tmp;
c = atomic64_read(v); smp_mb();
for (;;) { __asm__ __volatile__(
if (unlikely(c == (u))) "1: ldq_l %[tmp],%[mem]\n"
break; " cmpeq %[tmp],%[u],%[c]\n"
old = atomic64_cmpxchg((v), c, c + (a)); " addq %[tmp],%[a],%[tmp]\n"
if (likely(old == c)) " bne %[c],2f\n"
break; " stq_c %[tmp],%[mem]\n"
c = old; " beq %[tmp],3f\n"
} "2:\n"
return c != (u); ".subsection 2\n"
"3: br 1b\n"
".previous"
: [tmp] "=&r"(tmp), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
: "memory");
smp_mb();
return !c;
} }
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment