Commit 9257959a authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

locking/atomic: scripts: restructure fallback ifdeffery

Currently the various ordering variants of an atomic operation are
defined in groups of full/acquire/release/relaxed ordering variants with
some shared ifdeffery and several potential definitions of each ordering
variant in different branches of the shared ifdeffery.

As an ordering variant can have several potential definitions down
different branches of the shared ifdeffery, it can be painful for a
human to find a relevant definition, and we don't have a good location
to place anything common to all definitions of an ordering variant (e.g.
kerneldoc).

Historically the grouping of full/acquire/release/relaxed ordering
variants was necessary as we filled in the missing atomics in the same
namespace as the architecture used. It would be easy to accidentally
define one ordering fallback in terms of another ordering fallback with
redundant barriers, and avoiding that would otherwise require a lot of
baroque ifdeffery.

With recent changes we no longer need to fill in the missing atomics in
the arch_atomic*_<op>() namespace, and only need to fill in the
raw_atomic*_<op>() namespace. Due to this, there's no risk of a
namespace collision, and we can define each raw_atomic*_<op> ordering
variant with its own ifdeffery checking for the arch_atomic*_<op>
ordering variants.

Restructure the fallbacks in this way, with each ordering variant having
its own ifdeffery of the form:

| #if defined(arch_atomic_fetch_andnot_acquire)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
| #elif defined(arch_atomic_fetch_andnot_relaxed)
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| 	int ret = arch_atomic_fetch_andnot_relaxed(i, v);
| 	__atomic_acquire_fence();
| 	return ret;
| }
| #elif defined(arch_atomic_fetch_andnot)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
| #else
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| 	return raw_atomic_fetch_and_acquire(~i, v);
| }
| #endif

Note that where there's no relevant arch_atomic*_<op>() ordering
variant, we'll define the operation in terms of a distinct
raw_atomic*_<otherop>(), as this itself might have been filled in with a
fallback.

As we now generate the raw_atomic*_<op>() implementations directly, we
no longer need the trivial wrappers, so they are removed.

This makes the ifdeffery easier to follow, and will allow for further
improvements in subsequent patches.

There should be no functional change as a result of this patch.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-21-mark.rutland@arm.com
parent 1815da17
......@@ -78,7 +78,6 @@
})
#include <linux/atomic/atomic-arch-fallback.h>
#include <linux/atomic/atomic-raw.h>
#include <linux/atomic/atomic-long.h>
#include <linux/atomic/atomic-instrumented.h>
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
raw_${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
......
cat <<EOF
static __always_inline bool
arch_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
raw_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
{
return arch_${atomic}_add_return${order}(i, v) < 0;
return raw_${atomic}_add_return${order}(i, v) < 0;
}
EOF
cat << EOF
static __always_inline bool
arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
raw_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return arch_${atomic}_fetch_add_unless(v, a, u) != u;
return raw_${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
raw_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
${retstmt}raw_${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF
cat <<EOF
static __always_inline ${int}
arch_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new)
raw_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new)
{
return arch_cmpxchg${order}(&v->counter, old, new);
return raw_cmpxchg${order}(&v->counter, old, new);
}
EOF
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
raw_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
${retstmt}raw_${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_dec_and_test(${atomic}_t *v)
raw_${atomic}_dec_and_test(${atomic}_t *v)
{
return arch_${atomic}_dec_return(v) == 0;
return raw_${atomic}_dec_return(v) == 0;
}
EOF
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_dec_if_positive(${atomic}_t *v)
raw_${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = arch_${atomic}_read(v);
${int} dec, c = raw_${atomic}_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
} while (!arch_${atomic}_try_cmpxchg(v, &c, dec));
} while (!raw_${atomic}_try_cmpxchg(v, &c, dec));
return dec;
}
......
cat <<EOF
static __always_inline bool
arch_${atomic}_dec_unless_positive(${atomic}_t *v)
raw_${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c > 0))
return false;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c - 1));
return true;
}
......
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}(${params})
raw_${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
__atomic_pre_full_fence();
......
cat << EOF
static __always_inline ${int}
arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
raw_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c == u))
break;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}
......
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
raw_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v);
${retstmt}raw_${atomic}_${pfx}add${sfx}${order}(1, v);
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_inc_and_test(${atomic}_t *v)
raw_${atomic}_inc_and_test(${atomic}_t *v)
{
return arch_${atomic}_inc_return(v) == 0;
return raw_${atomic}_inc_return(v) == 0;
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_inc_not_zero(${atomic}_t *v)
raw_${atomic}_inc_not_zero(${atomic}_t *v)
{
return arch_${atomic}_add_unless(v, 1, 0);
return raw_${atomic}_add_unless(v, 1, 0);
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_inc_unless_negative(${atomic}_t *v)
raw_${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = arch_${atomic}_read(v);
${int} c = raw_${atomic}_read(v);
do {
if (unlikely(c < 0))
return false;
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1));
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + 1));
return true;
}
......
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_read_acquire(const ${atomic}_t *v)
raw_${atomic}_read_acquire(const ${atomic}_t *v)
{
${int} ret;
if (__native_word(${atomic}_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
ret = arch_${atomic}_read(v);
ret = raw_${atomic}_read(v);
__atomic_acquire_fence();
}
......
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}_release(${params})
raw_${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
......
cat <<EOF
static __always_inline void
arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
raw_${atomic}_set_release(${atomic}_t *v, ${int} i)
{
if (__native_word(${atomic}_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
arch_${atomic}_set(v, i);
raw_${atomic}_set(v, i);
}
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
raw_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return arch_${atomic}_sub_return(i, v) == 0;
return raw_${atomic}_sub_return(i, v) == 0;
}
EOF
cat <<EOF
static __always_inline bool
arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
raw_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
r = arch_${atomic}_cmpxchg${order}(v, o, new);
r = raw_${atomic}_cmpxchg${order}(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
......
cat <<EOF
static __always_inline ${int}
arch_${atomic}_xchg${order}(${atomic}_t *v, ${int} new)
raw_${atomic}_xchg${order}(${atomic}_t *v, ${int} new)
{
return arch_xchg${order}(&v->counter, new);
return raw_xchg${order}(&v->counter, new);
}
EOF
This diff is collapsed.
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
ATOMICDIR=$(dirname $0)
. ${ATOMICDIR}/atomic-tbl.sh
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
gen_proto_order_variant()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local atomic="$1"; shift
local int="$1"; shift
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local params="$(gen_params "${int}" "${atomic}" "$@")"
local args="$(gen_args "$@")"
local retstmt="$(gen_ret_stmt "${meta}")"
cat <<EOF
static __always_inline ${ret}
raw_${atomicname}(${params})
{
${retstmt}arch_${atomicname}(${args});
}
EOF
}
gen_xchg()
{
local xchg="$1"; shift
local order="$1"; shift
cat <<EOF
#define raw_${xchg}${order}(...) \\
arch_${xchg}${order}(__VA_ARGS__)
EOF
}
cat << EOF
// SPDX-License-Identifier: GPL-2.0
// Generated by $0
// DO NOT MODIFY THIS FILE DIRECTLY
#ifndef _LINUX_ATOMIC_RAW_H
#define _LINUX_ATOMIC_RAW_H
EOF
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
done
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do
for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg "${xchg}" "${order}"
printf "\n"
done
done
for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local"; do
gen_xchg "${xchg}" ""
printf "\n"
done
cat <<EOF
#endif /* _LINUX_ATOMIC_RAW_H */
EOF
......@@ -11,7 +11,6 @@ cat <<EOF |
gen-atomic-instrumented.sh linux/atomic/atomic-instrumented.h
gen-atomic-long.sh linux/atomic/atomic-long.h
gen-atomic-fallback.sh linux/atomic/atomic-arch-fallback.h
gen-atomic-raw.sh linux/atomic/atomic-raw.h
EOF
while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment