Commit 01cac82a authored by Ilya Leoshkevich's avatar Ilya Leoshkevich Committed by Vasily Gorbik

s390/atomic: mark all functions __always_inline

Atomic functions are quite ubiquitous and may be called by noinstr
ones, introducing unwanted instrumentation. They are very small, so
there are no significant downsides to force-inlining them.
Signed-off-by: default avatarIlya Leoshkevich <iii@linux.ibm.com>
Link: https://lore.kernel.org/r/20240320230007.4782-2-iii@linux.ibm.comSigned-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent e6ec07dc
...@@ -15,31 +15,31 @@ ...@@ -15,31 +15,31 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
static inline int arch_atomic_read(const atomic_t *v) static __always_inline int arch_atomic_read(const atomic_t *v)
{ {
return __atomic_read(v); return __atomic_read(v);
} }
#define arch_atomic_read arch_atomic_read #define arch_atomic_read arch_atomic_read
static inline void arch_atomic_set(atomic_t *v, int i) static __always_inline void arch_atomic_set(atomic_t *v, int i)
{ {
__atomic_set(v, i); __atomic_set(v, i);
} }
#define arch_atomic_set arch_atomic_set #define arch_atomic_set arch_atomic_set
static inline int arch_atomic_add_return(int i, atomic_t *v) static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{ {
return __atomic_add_barrier(i, &v->counter) + i; return __atomic_add_barrier(i, &v->counter) + i;
} }
#define arch_atomic_add_return arch_atomic_add_return #define arch_atomic_add_return arch_atomic_add_return
static inline int arch_atomic_fetch_add(int i, atomic_t *v) static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{ {
return __atomic_add_barrier(i, &v->counter); return __atomic_add_barrier(i, &v->counter);
} }
#define arch_atomic_fetch_add arch_atomic_fetch_add #define arch_atomic_fetch_add arch_atomic_fetch_add
static inline void arch_atomic_add(int i, atomic_t *v) static __always_inline void arch_atomic_add(int i, atomic_t *v)
{ {
__atomic_add(i, &v->counter); __atomic_add(i, &v->counter);
} }
...@@ -50,11 +50,11 @@ static inline void arch_atomic_add(int i, atomic_t *v) ...@@ -50,11 +50,11 @@ static inline void arch_atomic_add(int i, atomic_t *v)
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v) #define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
#define ATOMIC_OPS(op) \ #define ATOMIC_OPS(op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \ static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
__atomic_##op(i, &v->counter); \ __atomic_##op(i, &v->counter); \
} \ } \
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
return __atomic_##op##_barrier(i, &v->counter); \ return __atomic_##op##_barrier(i, &v->counter); \
} }
...@@ -74,7 +74,7 @@ ATOMIC_OPS(xor) ...@@ -74,7 +74,7 @@ ATOMIC_OPS(xor)
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
return __atomic_cmpxchg(&v->counter, old, new); return __atomic_cmpxchg(&v->counter, old, new);
} }
...@@ -82,31 +82,31 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) ...@@ -82,31 +82,31 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
static inline s64 arch_atomic64_read(const atomic64_t *v) static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{ {
return __atomic64_read(v); return __atomic64_read(v);
} }
#define arch_atomic64_read arch_atomic64_read #define arch_atomic64_read arch_atomic64_read
static inline void arch_atomic64_set(atomic64_t *v, s64 i) static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{ {
__atomic64_set(v, i); __atomic64_set(v, i);
} }
#define arch_atomic64_set arch_atomic64_set #define arch_atomic64_set arch_atomic64_set
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{ {
return __atomic64_add_barrier(i, (long *)&v->counter) + i; return __atomic64_add_barrier(i, (long *)&v->counter) + i;
} }
#define arch_atomic64_add_return arch_atomic64_add_return #define arch_atomic64_add_return arch_atomic64_add_return
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{ {
return __atomic64_add_barrier(i, (long *)&v->counter); return __atomic64_add_barrier(i, (long *)&v->counter);
} }
#define arch_atomic64_fetch_add arch_atomic64_fetch_add #define arch_atomic64_fetch_add arch_atomic64_fetch_add
static inline void arch_atomic64_add(s64 i, atomic64_t *v) static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{ {
__atomic64_add(i, (long *)&v->counter); __atomic64_add(i, (long *)&v->counter);
} }
...@@ -114,20 +114,20 @@ static inline void arch_atomic64_add(s64 i, atomic64_t *v) ...@@ -114,20 +114,20 @@ static inline void arch_atomic64_add(s64 i, atomic64_t *v)
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{ {
return __atomic64_cmpxchg((long *)&v->counter, old, new); return __atomic64_cmpxchg((long *)&v->counter, old, new);
} }
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
#define ATOMIC64_OPS(op) \ #define ATOMIC64_OPS(op) \
static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \ { \
__atomic64_##op(i, (long *)&v->counter); \ __atomic64_##op(i, (long *)&v->counter); \
} \ } \
static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \ { \
return __atomic64_##op##_barrier(i, (long *)&v->counter); \ return __atomic64_##op##_barrier(i, (long *)&v->counter); \
} }
ATOMIC64_OPS(and) ATOMIC64_OPS(and)
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#ifndef __ARCH_S390_ATOMIC_OPS__ #ifndef __ARCH_S390_ATOMIC_OPS__
#define __ARCH_S390_ATOMIC_OPS__ #define __ARCH_S390_ATOMIC_OPS__
static inline int __atomic_read(const atomic_t *v) static __always_inline int __atomic_read(const atomic_t *v)
{ {
int c; int c;
...@@ -18,14 +18,14 @@ static inline int __atomic_read(const atomic_t *v) ...@@ -18,14 +18,14 @@ static inline int __atomic_read(const atomic_t *v)
return c; return c;
} }
static inline void __atomic_set(atomic_t *v, int i) static __always_inline void __atomic_set(atomic_t *v, int i)
{ {
asm volatile( asm volatile(
" st %1,%0\n" " st %1,%0\n"
: "=R" (v->counter) : "d" (i)); : "=R" (v->counter) : "d" (i));
} }
static inline s64 __atomic64_read(const atomic64_t *v) static __always_inline s64 __atomic64_read(const atomic64_t *v)
{ {
s64 c; s64 c;
...@@ -35,7 +35,7 @@ static inline s64 __atomic64_read(const atomic64_t *v) ...@@ -35,7 +35,7 @@ static inline s64 __atomic64_read(const atomic64_t *v)
return c; return c;
} }
static inline void __atomic64_set(atomic64_t *v, s64 i) static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
{ {
asm volatile( asm volatile(
" stg %1,%0\n" " stg %1,%0\n"
...@@ -45,7 +45,7 @@ static inline void __atomic64_set(atomic64_t *v, s64 i) ...@@ -45,7 +45,7 @@ static inline void __atomic64_set(atomic64_t *v, s64 i)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \ #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
static inline op_type op_name(op_type val, op_type *ptr) \ static __always_inline op_type op_name(op_type val, op_type *ptr) \
{ \ { \
op_type old; \ op_type old; \
\ \
...@@ -96,7 +96,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi") ...@@ -96,7 +96,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \ #define __ATOMIC_OP(op_name, op_string) \
static inline int op_name(int val, int *ptr) \ static __always_inline int op_name(int val, int *ptr) \
{ \ { \
int old, new; \ int old, new; \
\ \
...@@ -122,7 +122,7 @@ __ATOMIC_OPS(__atomic_xor, "xr") ...@@ -122,7 +122,7 @@ __ATOMIC_OPS(__atomic_xor, "xr")
#undef __ATOMIC_OPS #undef __ATOMIC_OPS
#define __ATOMIC64_OP(op_name, op_string) \ #define __ATOMIC64_OP(op_name, op_string) \
static inline long op_name(long val, long *ptr) \ static __always_inline long op_name(long val, long *ptr) \
{ \ { \
long old, new; \ long old, new; \
\ \
...@@ -154,7 +154,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr") ...@@ -154,7 +154,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int __atomic_cmpxchg(int *ptr, int old, int new) static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
{ {
asm volatile( asm volatile(
" cs %[old],%[new],%[ptr]" " cs %[old],%[new],%[ptr]"
...@@ -164,7 +164,7 @@ static inline int __atomic_cmpxchg(int *ptr, int old, int new) ...@@ -164,7 +164,7 @@ static inline int __atomic_cmpxchg(int *ptr, int old, int new)
return old; return old;
} }
static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new) static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
{ {
int old_expected = old; int old_expected = old;
...@@ -176,7 +176,7 @@ static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new) ...@@ -176,7 +176,7 @@ static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
return old == old_expected; return old == old_expected;
} }
static inline long __atomic64_cmpxchg(long *ptr, long old, long new) static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{ {
asm volatile( asm volatile(
" csg %[old],%[new],%[ptr]" " csg %[old],%[new],%[ptr]"
...@@ -186,7 +186,7 @@ static inline long __atomic64_cmpxchg(long *ptr, long old, long new) ...@@ -186,7 +186,7 @@ static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
return old; return old;
} }
static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new) static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
{ {
long old_expected = old; long old_expected = old;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment