Commit 5bece3d6 authored by Daniel Axtens's avatar Daniel Axtens Committed by Michael Ellerman

powerpc: support KASAN instrumentation of bitops

The powerpc-specific bitops are not being picked up by the KASAN
test suite.

Instrumentation is done via the bitops/instrumented-{atomic,lock}.h
headers. They require that arch-specific versions of bitop functions
are renamed to arch_*. Do this renaming.

For clear_bit_unlock_is_negative_byte, the current implementation
uses the PG_waiters constant. This works because it's a preprocessor
macro - so it's only actually evaluated in contexts where PG_waiters
is defined. With instrumentation however, it becomes a static inline
function, and all of a sudden we need the actual value of PG_waiters.
Because of the order of header includes, it's not available and we
fail to compile. Instead, manually specify that we care about bit 7.
This is still correct: bit 7 is the bit that would mark a negative
byte.

While we're at it, replace __inline__ with inline across the file.
Reviewed-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarDaniel Axtens <dja@axtens.net>
Tested-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190820024941.12640-2-dja@axtens.net
parent 81d2c6f8
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
/* Macro for generating the ***_bits() functions */ /* Macro for generating the ***_bits() functions */
#define DEFINE_BITOP(fn, op, prefix) \ #define DEFINE_BITOP(fn, op, prefix) \
static __inline__ void fn(unsigned long mask, \ static inline void fn(unsigned long mask, \
volatile unsigned long *_p) \ volatile unsigned long *_p) \
{ \ { \
unsigned long old; \ unsigned long old; \
...@@ -86,22 +86,22 @@ DEFINE_BITOP(clear_bits, andc, "") ...@@ -86,22 +86,22 @@ DEFINE_BITOP(clear_bits, andc, "")
DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER) DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
DEFINE_BITOP(change_bits, xor, "") DEFINE_BITOP(change_bits, xor, "")
static __inline__ void set_bit(int nr, volatile unsigned long *addr) static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{ {
set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
} }
static __inline__ void clear_bit(int nr, volatile unsigned long *addr) static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{ {
clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
} }
static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) static inline void arch_clear_bit_unlock(int nr, volatile unsigned long *addr)
{ {
clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr)); clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
} }
static __inline__ void change_bit(int nr, volatile unsigned long *addr) static inline void arch_change_bit(int nr, volatile unsigned long *addr)
{ {
change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)); change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
} }
...@@ -109,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr) ...@@ -109,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
* operands. */ * operands. */
#define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \ #define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \
static __inline__ unsigned long fn( \ static inline unsigned long fn( \
unsigned long mask, \ unsigned long mask, \
volatile unsigned long *_p) \ volatile unsigned long *_p) \
{ \ { \
...@@ -138,34 +138,34 @@ DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER, ...@@ -138,34 +138,34 @@ DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER, DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0) PPC_ATOMIC_EXIT_BARRIER, 0)
static __inline__ int test_and_set_bit(unsigned long nr, static inline int arch_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
} }
static __inline__ int test_and_set_bit_lock(unsigned long nr, static inline int arch_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
return test_and_set_bits_lock(BIT_MASK(nr), return test_and_set_bits_lock(BIT_MASK(nr),
addr + BIT_WORD(nr)) != 0; addr + BIT_WORD(nr)) != 0;
} }
static __inline__ int test_and_clear_bit(unsigned long nr, static inline int arch_test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
} }
static __inline__ int test_and_change_bit(unsigned long nr, static inline int arch_test_and_change_bit(unsigned long nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
} }
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static __inline__ unsigned long clear_bit_unlock_return_word(int nr, static inline unsigned long
volatile unsigned long *addr) clear_bit_unlock_return_word(int nr, volatile unsigned long *addr)
{ {
unsigned long old, t; unsigned long old, t;
unsigned long *p = (unsigned long *)addr + BIT_WORD(nr); unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
...@@ -185,15 +185,18 @@ static __inline__ unsigned long clear_bit_unlock_return_word(int nr, ...@@ -185,15 +185,18 @@ static __inline__ unsigned long clear_bit_unlock_return_word(int nr,
return old; return old;
} }
/* This is a special function for mm/filemap.c */ /*
#define clear_bit_unlock_is_negative_byte(nr, addr) \ * This is a special function for mm/filemap.c
(clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters)) * Bit 7 corresponds to PG_waiters.
*/
#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \
(clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7))
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/non-atomic.h>
static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr)
{ {
__asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory"); __asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory");
__clear_bit(nr, addr); __clear_bit(nr, addr);
...@@ -215,14 +218,14 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) ...@@ -215,14 +218,14 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
* fls: find last (most-significant) bit set. * fls: find last (most-significant) bit set.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/ */
static __inline__ int fls(unsigned int x) static inline int fls(unsigned int x)
{ {
return 32 - __builtin_clz(x); return 32 - __builtin_clz(x);
} }
#include <asm-generic/bitops/builtin-__fls.h> #include <asm-generic/bitops/builtin-__fls.h>
static __inline__ int fls64(__u64 x) static inline int fls64(__u64 x)
{ {
return 64 - __builtin_clzll(x); return 64 - __builtin_clzll(x);
} }
...@@ -239,6 +242,10 @@ unsigned long __arch_hweight64(__u64 w); ...@@ -239,6 +242,10 @@ unsigned long __arch_hweight64(__u64 w);
#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/find.h>
/* wrappers that deal with KASAN instrumentation */
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>
/* Little-endian versions */ /* Little-endian versions */
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment