Commit cf3ee3c8 authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

locking/atomic: add generic arch_*() bitops

Now that all architectures provide arch_atomic_long_*(), we can
implement the generic bitops atop these rather than atop
atomic_long_*(), and provide arch_*() forms of the bitops that are safe
to use in noinstr code.

Now that all architectures provide arch_atomic_long_*(), we can
build the generic arch_*() bitops atop these, which can be safely used
in noinstr code. The regular bitop wrappers are built atop these.

As the generic non-atomic bitops use plain accesses, these will be
implicitly instrumented unless they are inlined into noinstr functions
(which is similar to arch_atomic*_read() when based on READ_ONCE()).
The wrappers are modified so that where the underlying arch_*() function
uses a plain access, no explicit instrumentation is added, as this is
redundant and could result in confusing reports.

Since function prototypes get excessively long with both an `arch_`
prefix and `__always_inline` attribute, the return type and function
attributes have been split onto a separate line, matching the style of
the generated atomic headers.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210713105253.7615-6-mark.rutland@arm.com
parent 67d1b0de
...@@ -11,25 +11,29 @@ ...@@ -11,25 +11,29 @@
* See Documentation/atomic_bitops.txt for details. * See Documentation/atomic_bitops.txt for details.
*/ */
static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p) static __always_inline void
arch_set_bit(unsigned int nr, volatile unsigned long *p)
{ {
p += BIT_WORD(nr); p += BIT_WORD(nr);
atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
} }
static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p) static __always_inline void
arch_clear_bit(unsigned int nr, volatile unsigned long *p)
{ {
p += BIT_WORD(nr); p += BIT_WORD(nr);
atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
} }
static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p) static __always_inline void
arch_change_bit(unsigned int nr, volatile unsigned long *p)
{ {
p += BIT_WORD(nr); p += BIT_WORD(nr);
atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
} }
static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) static __always_inline int
arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{ {
long old; long old;
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
...@@ -38,11 +42,12 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) ...@@ -38,11 +42,12 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
if (READ_ONCE(*p) & mask) if (READ_ONCE(*p) & mask)
return 1; return 1;
old = atomic_long_fetch_or(mask, (atomic_long_t *)p); old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask); return !!(old & mask);
} }
static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) static __always_inline int
arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{ {
long old; long old;
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
...@@ -51,18 +56,21 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) ...@@ -51,18 +56,21 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
if (!(READ_ONCE(*p) & mask)) if (!(READ_ONCE(*p) & mask))
return 0; return 0;
old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask); return !!(old & mask);
} }
static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) static __always_inline int
arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{ {
long old; long old;
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr); p += BIT_WORD(nr);
old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
return !!(old & mask); return !!(old & mask);
} }
#include <asm-generic/bitops/instrumented-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
*/ */
static inline void __set_bit(long nr, volatile unsigned long *addr) static inline void __set_bit(long nr, volatile unsigned long *addr)
{ {
if (!__is_defined(arch___set_bit_uses_plain_access))
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr); arch___set_bit(nr, addr);
} }
...@@ -39,6 +40,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr) ...@@ -39,6 +40,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
*/ */
static inline void __clear_bit(long nr, volatile unsigned long *addr) static inline void __clear_bit(long nr, volatile unsigned long *addr)
{ {
if (!__is_defined(arch___clear_bit_uses_plain_access))
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr); arch___clear_bit(nr, addr);
} }
...@@ -54,6 +56,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr) ...@@ -54,6 +56,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
*/ */
static inline void __change_bit(long nr, volatile unsigned long *addr) static inline void __change_bit(long nr, volatile unsigned long *addr)
{ {
if (!__is_defined(arch___change_bit_uses_plain_access))
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr); arch___change_bit(nr, addr);
} }
...@@ -92,6 +95,7 @@ static inline void __instrument_read_write_bitop(long nr, volatile unsigned long ...@@ -92,6 +95,7 @@ static inline void __instrument_read_write_bitop(long nr, volatile unsigned long
*/ */
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{ {
if (!__is_defined(arch___test_and_set_bit_uses_plain_access))
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr); return arch___test_and_set_bit(nr, addr);
} }
...@@ -106,6 +110,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) ...@@ -106,6 +110,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
*/ */
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{ {
if (!__is_defined(arch___test_and_clear_bit_uses_plain_access))
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr); return arch___test_and_clear_bit(nr, addr);
} }
...@@ -120,6 +125,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) ...@@ -120,6 +125,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
*/ */
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{ {
if (!__is_defined(arch___test_and_change_bit_uses_plain_access))
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr); return arch___test_and_change_bit(nr, addr);
} }
...@@ -131,6 +137,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) ...@@ -131,6 +137,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
*/ */
static inline bool test_bit(long nr, const volatile unsigned long *addr) static inline bool test_bit(long nr, const volatile unsigned long *addr)
{ {
if (!__is_defined(arch_test_bit_uses_plain_access))
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr); return arch_test_bit(nr, addr);
} }
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <asm/barrier.h> #include <asm/barrier.h>
/** /**
* test_and_set_bit_lock - Set a bit and return its old value, for lock * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
* the returned value is 0. * the returned value is 0.
* It can be used to implement bit locks. * It can be used to implement bit locks.
*/ */
static inline int test_and_set_bit_lock(unsigned int nr, static __always_inline int
volatile unsigned long *p) arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
{ {
long old; long old;
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
...@@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr, ...@@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr,
if (READ_ONCE(*p) & mask) if (READ_ONCE(*p) & mask)
return 1; return 1;
old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
return !!(old & mask); return !!(old & mask);
} }
/** /**
* clear_bit_unlock - Clear a bit in memory, for unlock * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
* This operation is atomic and provides release barrier semantics. * This operation is atomic and provides release barrier semantics.
*/ */
static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) static __always_inline void
arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{ {
p += BIT_WORD(nr); p += BIT_WORD(nr);
atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
} }
/** /**
* __clear_bit_unlock - Clear a bit in memory, for unlock * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -54,19 +55,19 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) ...@@ -54,19 +55,19 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
* *
* See for example x86's implementation. * See for example x86's implementation.
*/ */
static inline void __clear_bit_unlock(unsigned int nr, static inline void
volatile unsigned long *p) arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{ {
unsigned long old; unsigned long old;
p += BIT_WORD(nr); p += BIT_WORD(nr);
old = READ_ONCE(*p); old = READ_ONCE(*p);
old &= ~BIT_MASK(nr); old &= ~BIT_MASK(nr);
atomic_long_set_release((atomic_long_t *)p, old); arch_atomic_long_set_release((atomic_long_t *)p, old);
} }
/** /**
* clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
* byte is negative, for unlock. * byte is negative, for unlock.
* @nr: the bit to clear * @nr: the bit to clear
* @addr: the address to start counting from * @addr: the address to start counting from
...@@ -74,18 +75,20 @@ static inline void __clear_bit_unlock(unsigned int nr, ...@@ -74,18 +75,20 @@ static inline void __clear_bit_unlock(unsigned int nr,
* This is a bit of a one-trick-pony for the filemap code, which clears * This is a bit of a one-trick-pony for the filemap code, which clears
* PG_locked and tests PG_waiters, * PG_locked and tests PG_waiters,
*/ */
#ifndef clear_bit_unlock_is_negative_byte #ifndef arch_clear_bit_unlock_is_negative_byte
static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
volatile unsigned long *p) volatile unsigned long *p)
{ {
long old; long old;
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr); p += BIT_WORD(nr);
old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
return !!(old & BIT(7)); return !!(old & BIT(7));
} }
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
#endif #endif
#include <asm-generic/bitops/instrumented-lock.h>
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <asm/types.h> #include <asm/types.h>
/** /**
* __set_bit - Set a bit in memory * arch___set_bit - Set a bit in memory
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -13,24 +13,28 @@ ...@@ -13,24 +13,28 @@
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __set_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___set_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask; *p |= mask;
} }
#define arch___set_bit_uses_plain_access
static inline void __clear_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___clear_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask; *p &= ~mask;
} }
#define arch___clear_bit_uses_plain_access
/** /**
* __change_bit - Toggle a bit in memory * arch___change_bit - Toggle a bit in memory
* @nr: the bit to change * @nr: the bit to change
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -38,16 +42,18 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -38,16 +42,18 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __change_bit(int nr, volatile unsigned long *addr) static __always_inline
void arch___change_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask; *p ^= mask;
} }
#define arch___change_bit_uses_plain_access
/** /**
* __test_and_set_bit - Set a bit and return its old value * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -55,7 +61,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) ...@@ -55,7 +61,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) static __always_inline int
arch___test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -64,9 +71,10 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -64,9 +71,10 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
*p = old | mask; *p = old | mask;
return (old & mask) != 0; return (old & mask) != 0;
} }
#define arch___test_and_set_bit_uses_plain_access
/** /**
* __test_and_clear_bit - Clear a bit and return its old value * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -74,7 +82,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -74,7 +82,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static __always_inline int
arch___test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -83,10 +92,11 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) ...@@ -83,10 +92,11 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
*p = old & ~mask; *p = old & ~mask;
return (old & mask) != 0; return (old & mask) != 0;
} }
#define arch___test_and_clear_bit_uses_plain_access
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr, static __always_inline int
volatile unsigned long *addr) arch___test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -95,15 +105,20 @@ static inline int __test_and_change_bit(int nr, ...@@ -95,15 +105,20 @@ static inline int __test_and_change_bit(int nr,
*p = old ^ mask; *p = old ^ mask;
return (old & mask) != 0; return (old & mask) != 0;
} }
#define arch___test_and_change_bit_uses_plain_access
/** /**
* test_bit - Determine whether a bit is set * arch_test_bit - Determine whether a bit is set
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static inline int test_bit(int nr, const volatile unsigned long *addr) static __always_inline int
arch_test_bit(int nr, const volatile unsigned long *addr)
{ {
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
} }
#define arch_test_bit_uses_plain_access
#include <asm-generic/bitops/instrumented-non-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment