Commit 9257959a authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

locking/atomic: scripts: restructure fallback ifdeffery

Currently the various ordering variants of an atomic operation are
defined in groups of full/acquire/release/relaxed ordering variants with
some shared ifdeffery and several potential definitions of each ordering
variant in different branches of the shared ifdeffery.

As an ordering variant can have several potential definitions down
different branches of the shared ifdeffery, it can be painful for a
human to find a relevant definition, and we don't have a good location
to place anything common to all definitions of an ordering variant (e.g.
kerneldoc).

Historically the grouping of full/acquire/release/relaxed ordering
variants was necessary as we filled in the missing atomics in the same
namespace as the architecture used. It would be easy to accidentally
define one ordering fallback in terms of another ordering fallback with
redundant barriers, and avoiding that would otherwise require a lot of
baroque ifdeffery.

With recent changes we no longer need to fill in the missing atomics in
the arch_atomic*_<op>() namespace, and only need to fill in the
raw_atomic*_<op>() namespace. Due to this, there's no risk of a
namespace collision, and we can define each raw_atomic*_<op> ordering
variant with its own ifdeffery checking for the arch_atomic*_<op>
ordering variants.

Restructure the fallbacks in this way, with each ordering variant having
its own ifdeffery of the form:

| #if defined(arch_atomic_fetch_andnot_acquire)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
| #elif defined(arch_atomic_fetch_andnot_relaxed)
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| 	int ret = arch_atomic_fetch_andnot_relaxed(i, v);
| 	__atomic_acquire_fence();
| 	return ret;
| }
| #elif defined(arch_atomic_fetch_andnot)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
| #else
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| 	return raw_atomic_fetch_and_acquire(~i, v);
| }
| #endif

Note that where there's no relevant arch_atomic*_<op>() ordering
variant, we'll define the operation in terms of a distinct
raw_atomic*_<otherop>(), as this itself might have been filled in with a
fallback.

As we now generate the raw_atomic*_<op>() implementations directly, we
no longer need the trivial wrappers, so they are removed.

This makes the ifdeffery easier to follow, and will allow for further
improvements in subsequent patches.

There should be no functional change as a result of this patch.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-21-mark.rutland@arm.com
parent 1815da17
......@@ -78,7 +78,6 @@
})
#include <linux/atomic/atomic-arch-fallback.h>
#include <linux/atomic/atomic-raw.h>
#include <linux/atomic/atomic-long.h>
#include <linux/atomic/atomic-instrumented.h>
......
This source diff could not be displayed because it is too large. You can view the blob instead.
// SPDX-License-Identifier: GPL-2.0
// Generated by scripts/atomic/gen-atomic-raw.sh
// DO NOT MODIFY THIS FILE DIRECTLY
#ifndef _LINUX_ATOMIC_RAW_H
#define _LINUX_ATOMIC_RAW_H
static __always_inline int
raw_atomic_read(const atomic_t *v)
{
return arch_atomic_read(v);
}
static __always_inline int
raw_atomic_read_acquire(const atomic_t *v)
{
return arch_atomic_read_acquire(v);
}
static __always_inline void
raw_atomic_set(atomic_t *v, int i)
{
arch_atomic_set(v, i);
}
static __always_inline void
raw_atomic_set_release(atomic_t *v, int i)
{
arch_atomic_set_release(v, i);
}
static __always_inline void
raw_atomic_add(int i, atomic_t *v)
{
arch_atomic_add(i, v);
}
static __always_inline int
raw_atomic_add_return(int i, atomic_t *v)
{
return arch_atomic_add_return(i, v);
}
static __always_inline int
raw_atomic_add_return_acquire(int i, atomic_t *v)
{
return arch_atomic_add_return_acquire(i, v);
}
static __always_inline int
raw_atomic_add_return_release(int i, atomic_t *v)
{
return arch_atomic_add_return_release(i, v);
}
static __always_inline int
raw_atomic_add_return_relaxed(int i, atomic_t *v)
{
return arch_atomic_add_return_relaxed(i, v);
}
static __always_inline int
raw_atomic_fetch_add(int i, atomic_t *v)
{
return arch_atomic_fetch_add(i, v);
}
static __always_inline int
raw_atomic_fetch_add_acquire(int i, atomic_t *v)
{
return arch_atomic_fetch_add_acquire(i, v);
}
static __always_inline int
raw_atomic_fetch_add_release(int i, atomic_t *v)
{
return arch_atomic_fetch_add_release(i, v);
}
static __always_inline int
raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
{
return arch_atomic_fetch_add_relaxed(i, v);
}
static __always_inline void
raw_atomic_sub(int i, atomic_t *v)
{
arch_atomic_sub(i, v);
}
static __always_inline int
raw_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_sub_return(i, v);
}
static __always_inline int
raw_atomic_sub_return_acquire(int i, atomic_t *v)
{
return arch_atomic_sub_return_acquire(i, v);
}
static __always_inline int
raw_atomic_sub_return_release(int i, atomic_t *v)
{
return arch_atomic_sub_return_release(i, v);
}
static __always_inline int
raw_atomic_sub_return_relaxed(int i, atomic_t *v)
{
return arch_atomic_sub_return_relaxed(i, v);
}
static __always_inline int
raw_atomic_fetch_sub(int i, atomic_t *v)
{
return arch_atomic_fetch_sub(i, v);
}
static __always_inline int
raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
{
return arch_atomic_fetch_sub_acquire(i, v);
}
static __always_inline int
raw_atomic_fetch_sub_release(int i, atomic_t *v)
{
return arch_atomic_fetch_sub_release(i, v);
}
static __always_inline int
raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
return arch_atomic_fetch_sub_relaxed(i, v);
}
static __always_inline void
raw_atomic_inc(atomic_t *v)
{
arch_atomic_inc(v);
}
static __always_inline int
raw_atomic_inc_return(atomic_t *v)
{
return arch_atomic_inc_return(v);
}
static __always_inline int
raw_atomic_inc_return_acquire(atomic_t *v)
{
return arch_atomic_inc_return_acquire(v);
}
static __always_inline int
raw_atomic_inc_return_release(atomic_t *v)
{
return arch_atomic_inc_return_release(v);
}
static __always_inline int
raw_atomic_inc_return_relaxed(atomic_t *v)
{
return arch_atomic_inc_return_relaxed(v);
}
static __always_inline int
raw_atomic_fetch_inc(atomic_t *v)
{
return arch_atomic_fetch_inc(v);
}
static __always_inline int
raw_atomic_fetch_inc_acquire(atomic_t *v)
{
return arch_atomic_fetch_inc_acquire(v);
}
static __always_inline int
raw_atomic_fetch_inc_release(atomic_t *v)
{
return arch_atomic_fetch_inc_release(v);
}
static __always_inline int
raw_atomic_fetch_inc_relaxed(atomic_t *v)
{
return arch_atomic_fetch_inc_relaxed(v);
}
static __always_inline void
raw_atomic_dec(atomic_t *v)
{
arch_atomic_dec(v);
}
static __always_inline int
raw_atomic_dec_return(atomic_t *v)
{
return arch_atomic_dec_return(v);
}
static __always_inline int
raw_atomic_dec_return_acquire(atomic_t *v)
{
return arch_atomic_dec_return_acquire(v);
}
static __always_inline int
raw_atomic_dec_return_release(atomic_t *v)
{
return arch_atomic_dec_return_release(v);
}
static __always_inline int
raw_atomic_dec_return_relaxed(atomic_t *v)
{
return arch_atomic_dec_return_relaxed(v);
}
static __always_inline int
raw_atomic_fetch_dec(atomic_t *v)
{
return arch_atomic_fetch_dec(v);
}
static __always_inline int
raw_atomic_fetch_dec_acquire(atomic_t *v)
{
return arch_atomic_fetch_dec_acquire(v);
}
static __always_inline int
raw_atomic_fetch_dec_release(atomic_t *v)
{
return arch_atomic_fetch_dec_release(v);
}
static __always_inline int
raw_atomic_fetch_dec_relaxed(atomic_t *v)
{
return arch_atomic_fetch_dec_relaxed(v);
}
static __always_inline void
raw_atomic_and(int i, atomic_t *v)
{
arch_atomic_and(i, v);
}
static __always_inline int
raw_atomic_fetch_and(int i, atomic_t *v)
{
return arch_atomic_fetch_and(i, v);
}
static __always_inline int
raw_atomic_fetch_and_acquire(int i, atomic_t *v)
{
return arch_atomic_fetch_and_acquire(i, v);
}
static __always_inline int
raw_atomic_fetch_and_release(int i, atomic_t *v)
{
return arch_atomic_fetch_and_release(i, v);
}
static __always_inline int
raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
{
return arch_atomic_fetch_and_relaxed(i, v);
}
static __always_inline void
raw_atomic_andnot(int i, atomic_t *v)
{
arch_atomic_andnot(i, v);
}
static __always_inline int
raw_atomic_fetch_andnot(int i, atomic_t *v)
{
return arch_atomic_fetch_andnot(i, v);
}
static __always_inline int
raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
return arch_atomic_fetch_andnot_acquire(i, v);
}
static __always_inline int
raw_atomic_fetch_andnot_release(int i, atomic_t *v)
{
return arch_atomic_fetch_andnot_release(i, v);
}
static __always_inline int
raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
return arch_atomic_fetch_andnot_relaxed(i, v);
}
static __always_inline void
raw_atomic_or(int i, atomic_t *v)
{
arch_atomic_or(i, v);
}
static __always_inline int
raw_atomic_fetch_or(int i, atomic_t *v)
{
return arch_atomic_fetch_or(i, v);
}
static __always_inline int
raw_atomic_fetch_or_acquire(int i, atomic_t *v)
{
return arch_atomic_fetch_or_acquire(i, v);
}
static __always_inline int
raw_atomic_fetch_or_release(int i, atomic_t *v)
{
return arch_atomic_fetch_or_release(i, v);
}
static __always_inline int
raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
{
return arch_atomic_fetch_or_relaxed(i, v);
}
static __always_inline void
raw_atomic_xor(int i, atomic_t *v)
{
arch_atomic_xor(i, v);
}
static __always_inline int
raw_atomic_fetch_xor(int i, atomic_t *v)
{
return arch_atomic_fetch_xor(i, v);
}
static __always_inline int
raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
{
return arch_atomic_fetch_xor_acquire(i, v);
}
static __always_inline int
raw_atomic_fetch_xor_release(int i, atomic_t *v)
{
return arch_atomic_fetch_xor_release(i, v);
}
static __always_inline int
raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
return arch_atomic_fetch_xor_relaxed(i, v);
}
static __always_inline int
raw_atomic_xchg(atomic_t *v, int i)
{
return arch_atomic_xchg(v, i);
}
static __always_inline int
raw_atomic_xchg_acquire(atomic_t *v, int i)
{
return arch_atomic_xchg_acquire(v, i);
}
static __always_inline int
raw_atomic_xchg_release(atomic_t *v, int i)
{
return arch_atomic_xchg_release(v, i);
}
static __always_inline int
raw_atomic_xchg_relaxed(atomic_t *v, int i)
{
return arch_atomic_xchg_relaxed(v, i);
}
static __always_inline int
raw_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_atomic_cmpxchg(v, old, new);
}
static __always_inline int
raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
return arch_atomic_cmpxchg_acquire(v, old, new);
}
static __always_inline int
raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
return arch_atomic_cmpxchg_release(v, old, new);
}
static __always_inline int
raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return arch_atomic_try_cmpxchg(v, old, new);
}
static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
return arch_atomic_try_cmpxchg_release(v, old, new);
}
static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
static __always_inline bool
raw_atomic_sub_and_test(int i, atomic_t *v)
{
return arch_atomic_sub_and_test(i, v);
}
static __always_inline bool
raw_atomic_dec_and_test(atomic_t *v)
{
return arch_atomic_dec_and_test(v);
}
static __always_inline bool
raw_atomic_inc_and_test(atomic_t *v)
{
return arch_atomic_inc_and_test(v);
}
static __always_inline bool
raw_atomic_add_negative(int i, atomic_t *v)
{
return arch_atomic_add_negative(i, v);
}
static __always_inline bool
raw_atomic_add_negative_acquire(int i, atomic_t *v)
{
return arch_atomic_add_negative_acquire(i, v);
}
static __always_inline bool
raw_atomic_add_negative_release(int i, atomic_t *v)
{
return arch_atomic_add_negative_release(i, v);
}
static __always_inline bool
raw_atomic_add_negative_relaxed(int i, atomic_t *v)
{
return arch_atomic_add_negative_relaxed(i, v);
}
static __always_inline int
raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
return arch_atomic_fetch_add_unless(v, a, u);
}
static __always_inline bool
raw_atomic_add_unless(atomic_t *v, int a, int u)
{
return arch_atomic_add_unless(v, a, u);
}
static __always_inline bool
raw_atomic_inc_not_zero(atomic_t *v)
{
return arch_atomic_inc_not_zero(v);
}
static __always_inline bool
raw_atomic_inc_unless_negative(atomic_t *v)
{
return arch_atomic_inc_unless_negative(v);
}
static __always_inline bool
raw_atomic_dec_unless_positive(atomic_t *v)
{
return arch_atomic_dec_unless_positive(v);
}
static __always_inline int
raw_atomic_dec_if_positive(atomic_t *v)
{
return arch_atomic_dec_if_positive(v);
}
static __always_inline s64
raw_atomic64_read(const atomic64_t *v)
{
return arch_atomic64_read(v);
}
static __always_inline s64
raw_atomic64_read_acquire(const atomic64_t *v)
{
return arch_atomic64_read_acquire(v);
}
static __always_inline void
raw_atomic64_set(atomic64_t *v, s64 i)
{
arch_atomic64_set(v, i);
}
static __always_inline void
raw_atomic64_set_release(atomic64_t *v, s64 i)
{
arch_atomic64_set_release(v, i);
}
static __always_inline void
raw_atomic64_add(s64 i, atomic64_t *v)
{
arch_atomic64_add(i, v);
}
static __always_inline s64
raw_atomic64_add_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(i, v);
}
static __always_inline s64
raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return_acquire(i, v);
}
static __always_inline s64
raw_atomic64_add_return_release(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return_release(i, v);
}
static __always_inline s64
raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return_relaxed(i, v);
}
static __always_inline s64
raw_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_add(i, v);
}
static __always_inline s64
raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_add_acquire(i, v);
}
static __always_inline s64
raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_add_release(i, v);
}
static __always_inline s64
raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_add_relaxed(i, v);
}
static __always_inline void
raw_atomic64_sub(s64 i, atomic64_t *v)
{
arch_atomic64_sub(i, v);
}
static __always_inline s64
raw_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_sub_return(i, v);
}
static __always_inline s64
raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_sub_return_acquire(i, v);
}
static __always_inline s64
raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
{
return arch_atomic64_sub_return_release(i, v);
}
static __always_inline s64
raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_sub_return_relaxed(i, v);
}
static __always_inline s64
raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_sub(i, v);
}
static __always_inline s64
raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_sub_acquire(i, v);
}
static __always_inline s64
raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_sub_release(i, v);
}
static __always_inline s64
raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_sub_relaxed(i, v);
}
static __always_inline void
raw_atomic64_inc(atomic64_t *v)
{
arch_atomic64_inc(v);
}
static __always_inline s64
raw_atomic64_inc_return(atomic64_t *v)
{
return arch_atomic64_inc_return(v);
}
static __always_inline s64
raw_atomic64_inc_return_acquire(atomic64_t *v)
{
return arch_atomic64_inc_return_acquire(v);
}
static __always_inline s64
raw_atomic64_inc_return_release(atomic64_t *v)
{
return arch_atomic64_inc_return_release(v);
}
static __always_inline s64
raw_atomic64_inc_return_relaxed(atomic64_t *v)
{
return arch_atomic64_inc_return_relaxed(v);
}
static __always_inline s64
raw_atomic64_fetch_inc(atomic64_t *v)
{
return arch_atomic64_fetch_inc(v);
}
static __always_inline s64
raw_atomic64_fetch_inc_acquire(atomic64_t *v)
{
return arch_atomic64_fetch_inc_acquire(v);
}
static __always_inline s64
raw_atomic64_fetch_inc_release(atomic64_t *v)
{
return arch_atomic64_fetch_inc_release(v);
}
static __always_inline s64
raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
{
return arch_atomic64_fetch_inc_relaxed(v);
}
static __always_inline void
raw_atomic64_dec(atomic64_t *v)
{
arch_atomic64_dec(v);
}
static __always_inline s64
raw_atomic64_dec_return(atomic64_t *v)
{
return arch_atomic64_dec_return(v);
}
static __always_inline s64
raw_atomic64_dec_return_acquire(atomic64_t *v)
{
return arch_atomic64_dec_return_acquire(v);
}
static __always_inline s64
raw_atomic64_dec_return_release(atomic64_t *v)
{
return arch_atomic64_dec_return_release(v);
}
static __always_inline s64
raw_atomic64_dec_return_relaxed(atomic64_t *v)
{
return arch_atomic64_dec_return_relaxed(v);
}
static __always_inline s64
raw_atomic64_fetch_dec(atomic64_t *v)
{
return arch_atomic64_fetch_dec(v);
}
static __always_inline s64
raw_atomic64_fetch_dec_acquire(atomic64_t *v)
{
return arch_atomic64_fetch_dec_acquire(v);
}
static __always_inline s64
raw_atomic64_fetch_dec_release(atomic64_t *v)
{
return arch_atomic64_fetch_dec_release(v);
}
static __always_inline s64
raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
{
return arch_atomic64_fetch_dec_relaxed(v);
}
static __always_inline void
raw_atomic64_and(s64 i, atomic64_t *v)
{
arch_atomic64_and(i, v);
}
static __always_inline s64
raw_atomic64_fetch_and(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_and(i, v);
}
static __always_inline s64
raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_and_acquire(i, v);
}
static __always_inline s64
raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_and_release(i, v);
}
static __always_inline s64
raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_and_relaxed(i, v);
}
static __always_inline void
raw_atomic64_andnot(s64 i, atomic64_t *v)
{
arch_atomic64_andnot(i, v);
}
static __always_inline s64
raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_andnot(i, v);
}
static __always_inline s64
raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_andnot_acquire(i, v);
}
static __always_inline s64
raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_andnot_release(i, v);
}
static __always_inline s64
raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
static __always_inline void
raw_atomic64_or(s64 i, atomic64_t *v)
{
arch_atomic64_or(i, v);
}
static __always_inline s64
raw_atomic64_fetch_or(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_or(i, v);
}
static __always_inline s64
raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_or_acquire(i, v);
}
static __always_inline s64
raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_or_release(i, v);
}
static __always_inline s64
raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_or_relaxed(i, v);
}
static __always_inline void
raw_atomic64_xor(s64 i, atomic64_t *v)
{
arch_atomic64_xor(i, v);
}
static __always_inline s64
raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_xor(i, v);
}
static __always_inline s64
raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_xor_acquire(i, v);
}
static __always_inline s64
raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_xor_release(i, v);
}
static __always_inline s64
raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_fetch_xor_relaxed(i, v);
}
static __always_inline s64
raw_atomic64_xchg(atomic64_t *v, s64 i)
{
return arch_atomic64_xchg(v, i);
}
static __always_inline s64
raw_atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
return arch_atomic64_xchg_acquire(v, i);
}
static __always_inline s64
raw_atomic64_xchg_release(atomic64_t *v, s64 i)
{
return arch_atomic64_xchg_release(v, i);
}
static __always_inline s64
raw_atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
return arch_atomic64_xchg_relaxed(v, i);
}
static __always_inline s64
raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_atomic64_cmpxchg(v, old, new);
}
static __always_inline s64
raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
static __always_inline s64
raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
return arch_atomic64_cmpxchg_release(v, old, new);
}
static __always_inline s64
raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return arch_atomic64_try_cmpxchg(v, old, new);
}
static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
static __always_inline bool
raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
{
return arch_atomic64_sub_and_test(i, v);
}
static __always_inline bool
raw_atomic64_dec_and_test(atomic64_t *v)
{
return arch_atomic64_dec_and_test(v);
}
static __always_inline bool
raw_atomic64_inc_and_test(atomic64_t *v)
{
return arch_atomic64_inc_and_test(v);
}
static __always_inline bool
raw_atomic64_add_negative(s64 i, atomic64_t *v)
{
return arch_atomic64_add_negative(i, v);
}
static __always_inline bool
raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
{
return arch_atomic64_add_negative_acquire(i, v);
}
static __always_inline bool
raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
{
return arch_atomic64_add_negative_release(i, v);
}
static __always_inline bool
raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
{
return arch_atomic64_add_negative_relaxed(i, v);
}
static __always_inline s64
raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
return arch_atomic64_fetch_add_unless(v, a, u);
}
static __always_inline bool
raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
return arch_atomic64_add_unless(v, a, u);
}
static __always_inline bool
raw_atomic64_inc_not_zero(atomic64_t *v)
{
return arch_atomic64_inc_not_zero(v);
}
static __always_inline bool
raw_atomic64_inc_unless_negative(atomic64_t *v)
{
return arch_atomic64_inc_unless_negative(v);
}
static __always_inline bool
raw_atomic64_dec_unless_positive(atomic64_t *v)
{
return arch_atomic64_dec_unless_positive(v);
}
static __always_inline s64
raw_atomic64_dec_if_positive(atomic64_t *v)
{
return arch_atomic64_dec_if_positive(v);
}
#define raw_xchg(...) \
arch_xchg(__VA_ARGS__)
#define raw_xchg_acquire(...) \
arch_xchg_acquire(__VA_ARGS__)
#define raw_xchg_release(...) \
arch_xchg_release(__VA_ARGS__)
#define raw_xchg_relaxed(...) \
arch_xchg_relaxed(__VA_ARGS__)
#define raw_cmpxchg(...) \
arch_cmpxchg(__VA_ARGS__)
#define raw_cmpxchg_acquire(...) \
arch_cmpxchg_acquire(__VA_ARGS__)
#define raw_cmpxchg_release(...) \
arch_cmpxchg_release(__VA_ARGS__)
#define raw_cmpxchg_relaxed(...) \
arch_cmpxchg_relaxed(__VA_ARGS__)
#define raw_cmpxchg64(...) \
arch_cmpxchg64(__VA_ARGS__)
#define raw_cmpxchg64_acquire(...) \
arch_cmpxchg64_acquire(__VA_ARGS__)
#define raw_cmpxchg64_release(...) \
arch_cmpxchg64_release(__VA_ARGS__)
#define raw_cmpxchg64_relaxed(...) \
arch_cmpxchg64_relaxed(__VA_ARGS__)
#define raw_cmpxchg128(...) \
arch_cmpxchg128(__VA_ARGS__)
#define raw_cmpxchg128_acquire(...) \
arch_cmpxchg128_acquire(__VA_ARGS__)
#define raw_cmpxchg128_release(...) \
arch_cmpxchg128_release(__VA_ARGS__)
#define raw_cmpxchg128_relaxed(...) \
arch_cmpxchg128_relaxed(__VA_ARGS__)
#define raw_try_cmpxchg(...) \
arch_try_cmpxchg(__VA_ARGS__)
#define raw_try_cmpxchg_acquire(...) \
arch_try_cmpxchg_acquire(__VA_ARGS__)
#define raw_try_cmpxchg_release(...) \
arch_try_cmpxchg_release(__VA_ARGS__)
#define raw_try_cmpxchg_relaxed(...) \
arch_try_cmpxchg_relaxed(__VA_ARGS__)
#define raw_try_cmpxchg64(...) \
arch_try_cmpxchg64(__VA_ARGS__)
#define raw_try_cmpxchg64_acquire(...) \
arch_try_cmpxchg64_acquire(__VA_ARGS__)
#define raw_try_cmpxchg64_release(...) \
arch_try_cmpxchg64_release(__VA_ARGS__)
#define raw_try_cmpxchg64_relaxed(...) \
arch_try_cmpxchg64_relaxed(__VA_ARGS__)
#define raw_try_cmpxchg128(...) \
arch_try_cmpxchg128(__VA_ARGS__)
#define raw_try_cmpxchg128_acquire(...) \
arch_try_cmpxchg128_acquire(__VA_ARGS__)
#define raw_try_cmpxchg128_release(...) \
arch_try_cmpxchg128_release(__VA_ARGS__)
#define raw_try_cmpxchg128_relaxed(...) \
arch_try_cmpxchg128_relaxed(__VA_ARGS__)
#define raw_cmpxchg_local(...) \
arch_cmpxchg_local(__VA_ARGS__)
#define raw_cmpxchg64_local(...) \
arch_cmpxchg64_local(__VA_ARGS__)
#define raw_cmpxchg128_local(...) \
arch_cmpxchg128_local(__VA_ARGS__)
#define raw_sync_cmpxchg(...) \
arch_sync_cmpxchg(__VA_ARGS__)
#define raw_try_cmpxchg_local(...) \
arch_try_cmpxchg_local(__VA_ARGS__)
#define raw_try_cmpxchg64_local(...) \
arch_try_cmpxchg64_local(__VA_ARGS__)
#define raw_try_cmpxchg128_local(...) \
arch_try_cmpxchg128_local(__VA_ARGS__)
#endif /* _LINUX_ATOMIC_RAW_H */
// b23ed4424e85200e200ded094522e1d743b3a5b1
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
raw_${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
......
cat <<EOF
static __always_inline bool
arch_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
raw_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
{
return arch_${atomic}_add_return${order}(i, v) < 0;
return raw_${atomic}_add_return${order}(i, v) < 0;
}
EOF
cat << EOF
static __always_inline bool
arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
raw_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return arch_${atomic}_fetch_add_unless(v, a, u) != u;
return raw_${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
raw_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
${retstmt}raw_${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF
cat <<EOF
static __always_inline ${int}
arch_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new)
raw_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new)
{
return arch_cmpxchg${order}(&v->counter, old, new);
return raw_cmpxchg${order}(&v->counter, old, new);
}
EOF
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
raw_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
${retstmt}raw_${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_dec_and_test(${atomic}_t *v)
raw_${atomic}_dec_and_test(${atomic}_t *v)
{
return arch_${atomic}_dec_return(v) == 0;
return raw_${atomic}_dec_return(v) == 0;
}
EOF
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_dec_if_positive(${atomic}_t *v)
raw_${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = arch_${atomic}_read(v);
${int} dec, c = raw_${atomic}_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
} while (!arch_${atomic}_try_cmpxchg(v, &c, dec));
} while (!raw_${atomic}_try_cmpxchg(v, &c, dec));
return dec;
}
......
cat <<EOF
static __always_inline bool
arch_${atomic}_dec_unless_positive(${atomic}_t *v)
raw_${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c > 0))
return false;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c - 1));
return true;
}
......
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}(${params})
raw_${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
__atomic_pre_full_fence();
......
cat << EOF
static __always_inline ${int}
arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
raw_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c == u))
break;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}
......
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
raw_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v);
${retstmt}raw_${atomic}_${pfx}add${sfx}${order}(1, v);
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_inc_and_test(${atomic}_t *v)
raw_${atomic}_inc_and_test(${atomic}_t *v)
{
return arch_${atomic}_inc_return(v) == 0;
return raw_${atomic}_inc_return(v) == 0;
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_inc_not_zero(${atomic}_t *v)
raw_${atomic}_inc_not_zero(${atomic}_t *v)
{
return arch_${atomic}_add_unless(v, 1, 0);
return raw_${atomic}_add_unless(v, 1, 0);
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_inc_unless_negative(${atomic}_t *v)
raw_${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c < 0))
return false;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + 1));
return true;
}
......
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_read_acquire(const ${atomic}_t *v)
raw_${atomic}_read_acquire(const ${atomic}_t *v)
{
${int} ret;
if (__native_word(${atomic}_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
ret = arch_${atomic}_read(v);
ret = raw_${atomic}_read(v);
__atomic_acquire_fence();
}
......
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}_release(${params})
raw_${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
......
cat <<EOF
static __always_inline void
arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
raw_${atomic}_set_release(${atomic}_t *v, ${int} i)
{
if (__native_word(${atomic}_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
arch_${atomic}_set(v, i);
raw_${atomic}_set(v, i);
}
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
raw_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return arch_${atomic}_sub_return(i, v) == 0;
return raw_${atomic}_sub_return(i, v) == 0;
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
raw_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
r = arch_${atomic}_cmpxchg${order}(v, o, new);
r = raw_${atomic}_cmpxchg${order}(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
......
cat <<EOF
static __always_inline ${int}
arch_${atomic}_xchg${order}(${atomic}_t *v, ${int} new)
raw_${atomic}_xchg${order}(${atomic}_t *v, ${int} new)
{
return arch_xchg${order}(&v->counter, new);
return raw_xchg${order}(&v->counter, new);
}
EOF
......@@ -17,19 +17,12 @@ gen_template_fallback()
local atomic="$1"; shift
local int="$1"; shift
local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local args="$(gen_args "$@")"
if [ ! -z "${template}" ]; then
printf "#ifndef ${atomicname}\n"
. ${template}
printf "#define ${atomicname} ${atomicname}\n"
printf "#endif\n\n"
fi
. ${template}
}
#gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
......@@ -59,69 +52,92 @@ gen_proto_fallback()
gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
}
#gen_basic_fallbacks(basename)
gen_basic_fallbacks()
{
local basename="$1"; shift
cat << EOF
#define ${basename}_acquire ${basename}
#define ${basename}_release ${basename}
#define ${basename}_relaxed ${basename}
EOF
}
#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
gen_proto_order_variants()
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, args...)
gen_proto_order_variant()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local atomic="$1"
local basename="arch_${atomic}_${pfx}${name}${sfx}"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "")"
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local basename="${atomic}_${pfx}${name}${sfx}"
# If we don't have relaxed atomics, then we don't bother with ordering fallbacks
# read_acquire and set_release need to be templated, though
if ! meta_has_relaxed "${meta}"; then
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
if meta_has_acquire "${meta}"; then
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
fi
# Where there is no possible fallback, this order variant is mandatory
# and must be provided by arch code. Add a comment to the header to
# make this obvious.
#
# Ideally we'd error on a missing definition, but arch code might
# define this order variant as a C function without a preprocessor
# symbol.
if [ -z ${template} ] && [ -z "${order}" ] && ! meta_has_relaxed "${meta}"; then
printf "#define raw_${atomicname} arch_${atomicname}\n\n"
return
fi
if meta_has_release "${meta}"; then
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
fi
printf "#if defined(arch_${atomicname})\n"
printf "#define raw_${atomicname} arch_${atomicname}\n"
return
# Allow FULL/ACQUIRE/RELEASE ops to be defined in terms of RELAXED ops
if [ "${order}" != "_relaxed" ] && meta_has_relaxed "${meta}"; then
printf "#elif defined(arch_${basename}_relaxed)\n"
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
fi
printf "#ifndef ${basename}_relaxed\n"
# Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops
if [ ! -z "${order}" ]; then
printf "#elif defined(arch_${basename})\n"
printf "#define raw_${atomicname} arch_${basename}\n"
fi
printf "#else\n"
if [ ! -z "${template}" ]; then
printf "#ifdef ${basename}\n"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
else
printf "#error \"Unable to define raw_${atomicname}\"\n"
fi
gen_basic_fallbacks "${basename}"
printf "#endif\n\n"
}
if [ ! -z "${template}" ]; then
printf "#endif /* ${basename} */\n\n"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
gen_proto_order_variants()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local atomic="$1"
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
if meta_has_acquire "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
fi
printf "#else /* ${basename}_relaxed */\n\n"
if meta_has_release "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
fi
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
if meta_has_relaxed "${meta}"; then
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
fi
}
printf "#endif /* ${basename}_relaxed */\n\n"
#gen_basic_fallbacks(basename)
gen_basic_fallbacks()
{
local basename="$1"; shift
cat << EOF
#define raw_${basename}_acquire arch_${basename}
#define raw_${basename}_release arch_${basename}
#define raw_${basename}_relaxed arch_${basename}
EOF
}
gen_order_fallbacks()
......@@ -130,36 +146,65 @@ gen_order_fallbacks()
cat <<EOF
#ifndef ${xchg}_acquire
#define ${xchg}_acquire(...) \\
__atomic_op_acquire(${xchg}, __VA_ARGS__)
#define raw_${xchg}_relaxed arch_${xchg}_relaxed
#ifdef arch_${xchg}_acquire
#define raw_${xchg}_acquire arch_${xchg}_acquire
#else
#define raw_${xchg}_acquire(...) \\
__atomic_op_acquire(arch_${xchg}, __VA_ARGS__)
#endif
#ifndef ${xchg}_release
#define ${xchg}_release(...) \\
__atomic_op_release(${xchg}, __VA_ARGS__)
#ifdef arch_${xchg}_release
#define raw_${xchg}_release arch_${xchg}_release
#else
#define raw_${xchg}_release(...) \\
__atomic_op_release(arch_${xchg}, __VA_ARGS__)
#endif
#ifndef ${xchg}
#define ${xchg}(...) \\
__atomic_op_fence(${xchg}, __VA_ARGS__)
#ifdef arch_${xchg}
#define raw_${xchg} arch_${xchg}
#else
#define raw_${xchg}(...) \\
__atomic_op_fence(arch_${xchg}, __VA_ARGS__)
#endif
EOF
}
gen_xchg_fallbacks()
gen_xchg_order_fallback()
{
local xchg="$1"; shift
printf "#ifndef ${xchg}_relaxed\n"
local order="$1"; shift
local forder="${order:-_fence}"
gen_basic_fallbacks ${xchg}
printf "#if defined(arch_${xchg}${order})\n"
printf "#define raw_${xchg}${order} arch_${xchg}${order}\n"
printf "#else /* ${xchg}_relaxed */\n"
if [ "${order}" != "_relaxed" ]; then
printf "#elif defined(arch_${xchg}_relaxed)\n"
printf "#define raw_${xchg}${order}(...) \\\\\n"
printf " __atomic_op${forder}(arch_${xchg}, __VA_ARGS__)\n"
fi
gen_order_fallbacks ${xchg}
if [ ! -z "${order}" ]; then
printf "#elif defined(arch_${xchg})\n"
printf "#define raw_${xchg}${order} arch_${xchg}\n"
fi
printf "#endif /* ${xchg}_relaxed */\n\n"
printf "#else\n"
printf "extern void raw_${xchg}${order}_not_implemented(void);\n"
printf "#define raw_${xchg}${order}(...) raw_${xchg}${order}_not_implemented()\n"
printf "#endif\n\n"
}
gen_xchg_fallbacks()
{
local xchg="$1"; shift
for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg_order_fallback "${xchg}" "${order}"
done
}
gen_try_cmpxchg_fallback()
......@@ -168,40 +213,61 @@ gen_try_cmpxchg_fallback()
local order="$1"; shift;
cat <<EOF
#ifndef arch_try_${cmpxchg}${order}
#define arch_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
#define raw_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
({ \\
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
___r = arch_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
___r = raw_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
if (unlikely(___r != ___o)) \\
*___op = ___r; \\
likely(___r == ___o); \\
})
#endif /* arch_try_${cmpxchg}${order} */
EOF
}
gen_try_cmpxchg_fallbacks()
gen_try_cmpxchg_order_fallback()
{
local cmpxchg="$1"; shift;
local cmpxchg="$1"; shift
local order="$1"; shift
local forder="${order:-_fence}"
printf "#ifndef arch_try_${cmpxchg}_relaxed\n"
printf "#ifdef arch_try_${cmpxchg}\n"
printf "#if defined(arch_try_${cmpxchg}${order})\n"
printf "#define raw_try_${cmpxchg}${order} arch_try_${cmpxchg}${order}\n"
gen_basic_fallbacks "arch_try_${cmpxchg}"
if [ "${order}" != "_relaxed" ]; then
printf "#elif defined(arch_try_${cmpxchg}_relaxed)\n"
printf "#define raw_try_${cmpxchg}${order}(...) \\\\\n"
printf " __atomic_op${forder}(arch_try_${cmpxchg}, __VA_ARGS__)\n"
fi
if [ ! -z "${order}" ]; then
printf "#elif defined(arch_try_${cmpxchg})\n"
printf "#define raw_try_${cmpxchg}${order} arch_try_${cmpxchg}\n"
fi
printf "#endif /* arch_try_${cmpxchg} */\n\n"
printf "#else\n"
gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
printf "#endif\n\n"
}
gen_try_cmpxchg_fallbacks()
{
local cmpxchg="$1"; shift;
for order in "" "_acquire" "_release" "_relaxed"; do
gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
gen_try_cmpxchg_order_fallback "${cmpxchg}" "${order}"
done
}
printf "#else /* arch_try_${cmpxchg}_relaxed */\n"
gen_order_fallbacks "arch_try_${cmpxchg}"
gen_cmpxchg_local_fallbacks()
{
local cmpxchg="$1"; shift
printf "#endif /* arch_try_${cmpxchg}_relaxed */\n\n"
printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n"
printf "#ifdef arch_try_${cmpxchg}\n"
printf "#define raw_try_${cmpxchg} arch_try_${cmpxchg}\n"
printf "#else\n"
gen_try_cmpxchg_fallback "${cmpxchg}" ""
printf "#endif\n\n"
}
cat << EOF
......@@ -217,7 +283,7 @@ cat << EOF
EOF
for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64" "arch_cmpxchg128"; do
for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128"; do
gen_xchg_fallbacks "${xchg}"
done
......@@ -225,8 +291,12 @@ for cmpxchg in "cmpxchg" "cmpxchg64" "cmpxchg128"; do
gen_try_cmpxchg_fallbacks "${cmpxchg}"
done
for cmpxchg in "cmpxchg_local" "cmpxchg64_local"; do
gen_try_cmpxchg_fallback "${cmpxchg}" ""
for cmpxchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local"; do
gen_cmpxchg_local_fallbacks "${cmpxchg}" ""
done
for cmpxchg in "sync_cmpxchg"; do
printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n"
done
grep '^[a-z]' "$1" | while read name meta args; do
......
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
ATOMICDIR=$(dirname $0)
. ${ATOMICDIR}/atomic-tbl.sh
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
gen_proto_order_variant()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local atomic="$1"; shift
local int="$1"; shift
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local args="$(gen_args "$@")"
local retstmt="$(gen_ret_stmt "${meta}")"
cat <<EOF
static __always_inline ${ret}
raw_${atomicname}(${params})
{
${retstmt}arch_${atomicname}(${args});
}
EOF
}
gen_xchg()
{
local xchg="$1"; shift
local order="$1"; shift
cat <<EOF
#define raw_${xchg}${order}(...) \\
arch_${xchg}${order}(__VA_ARGS__)
EOF
}
cat << EOF
// SPDX-License-Identifier: GPL-2.0
// Generated by $0
// DO NOT MODIFY THIS FILE DIRECTLY
#ifndef _LINUX_ATOMIC_RAW_H
#define _LINUX_ATOMIC_RAW_H
EOF
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
done
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do
for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg "${xchg}" "${order}"
printf "\n"
done
done
for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local"; do
gen_xchg "${xchg}" ""
printf "\n"
done
cat <<EOF
#endif /* _LINUX_ATOMIC_RAW_H */
EOF
......@@ -11,7 +11,6 @@ cat <<EOF |
gen-atomic-instrumented.sh linux/atomic/atomic-instrumented.h
gen-atomic-long.sh linux/atomic/atomic-long.h
gen-atomic-fallback.sh linux/atomic/atomic-arch-fallback.h
gen-atomic-raw.sh linux/atomic/atomic-raw.h
EOF
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment