Commit 7fc1845d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

x86: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent ae8c35c8
...@@ -182,6 +182,23 @@ static inline int atomic_xchg(atomic_t *v, int new) ...@@ -182,6 +182,23 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(&v->counter, new); return xchg(&v->counter, new);
} }
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
asm volatile(LOCK_PREFIX #op"l %1,%0" \
: "+m" (v->counter) \
: "ir" (i) \
: "memory"); \
}
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OP
/** /**
* __atomic_add_unless - add unless the number is already a given value * __atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
...@@ -219,15 +236,15 @@ static __always_inline short int atomic_inc_short(short int *v) ...@@ -219,15 +236,15 @@ static __always_inline short int atomic_inc_short(short int *v)
return *v; return *v;
} }
/* These are x86-specific, used by some header files */ static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
#define atomic_clear_mask(mask, addr) \ {
asm volatile(LOCK_PREFIX "andl %0,%1" \ atomic_and(~mask, v);
: : "r" (~(mask)), "m" (*(addr)) : "memory") }
#define atomic_set_mask(mask, addr) \ static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
asm volatile(LOCK_PREFIX "orl %0,%1" \ {
: : "r" ((unsigned)(mask)), "m" (*(addr)) \ atomic_or(mask, v);
: "memory") }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# include <asm/atomic64_32.h> # include <asm/atomic64_32.h>
......
...@@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) ...@@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64 #undef alternative_atomic64
#undef __alternative_atomic64 #undef __alternative_atomic64
#define ATOMIC64_OP(op, c_op) \
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
long long old, c = 0; \
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
c = old; \
}
ATOMIC64_OP(and, &)
ATOMIC64_OP(or, |)
ATOMIC64_OP(xor, ^)
#undef ATOMIC64_OP
#endif /* _ASM_X86_ATOMIC64_32_H */ #endif /* _ASM_X86_ATOMIC64_32_H */
...@@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) ...@@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
return dec; return dec;
} }
#define ATOMIC64_OP(op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
asm volatile(LOCK_PREFIX #op"q %1,%0" \
: "+m" (v->counter) \
: "er" (i) \
: "memory"); \
}
ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)
#undef ATOMIC64_OP
#endif /* _ASM_X86_ATOMIC64_64_H */ #endif /* _ASM_X86_ATOMIC64_64_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment