locking/atomic: remove ARCH_ATOMIC remanants
authorMark Rutland <mark.rutland@arm.com>
Tue, 13 Jul 2021 10:52:50 +0000 (11:52 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 16 Jul 2021 16:46:44 +0000 (18:46 +0200)
Now that gen-atomic-fallback.sh is only used to generate the arch_*
fallbacks, we don't need to also generate the non-arch_* forms, and can
removethe infrastructure this needed.

There is no change to any of the generated headers as a result of this
patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210713105253.7615-3-mark.rutland@arm.com
21 files changed:
scripts/atomic/fallbacks/acquire
scripts/atomic/fallbacks/add_negative
scripts/atomic/fallbacks/add_unless
scripts/atomic/fallbacks/andnot
scripts/atomic/fallbacks/dec
scripts/atomic/fallbacks/dec_and_test
scripts/atomic/fallbacks/dec_if_positive
scripts/atomic/fallbacks/dec_unless_positive
scripts/atomic/fallbacks/fence
scripts/atomic/fallbacks/fetch_add_unless
scripts/atomic/fallbacks/inc
scripts/atomic/fallbacks/inc_and_test
scripts/atomic/fallbacks/inc_not_zero
scripts/atomic/fallbacks/inc_unless_negative
scripts/atomic/fallbacks/read_acquire
scripts/atomic/fallbacks/release
scripts/atomic/fallbacks/set_release
scripts/atomic/fallbacks/sub_and_test
scripts/atomic/fallbacks/try_cmpxchg
scripts/atomic/gen-atomic-fallback.sh
scripts/atomic/gen-atomics.sh

index 59c00529dc7cf65d0f3cfe0aca8c8ddf5bdbc576..ef764085c79aa290ec537ce3771abfef6761c309 100755 (executable)
@@ -1,8 +1,8 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
+arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
 {
-       ${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
        __atomic_acquire_fence();
        return ret;
 }
index a66635bceefbf3dc821f261970aec7c837f07846..15caa2eb237128a3f1bcad00694c0e1ada8b0b00 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_add_negative - add and test if negative
+ * arch_${atomic}_add_negative - add and test if negative
  * @i: integer value to add
  * @v: pointer of type ${atomic}_t
  *
@@ -9,8 +9,8 @@ cat <<EOF
  * result is greater than or equal to zero.
  */
 static __always_inline bool
-${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
+arch_${atomic}_add_negative(${int} i, ${atomic}_t *v)
 {
-       return ${arch}${atomic}_add_return(i, v) < 0;
+       return arch_${atomic}_add_return(i, v) < 0;
 }
 EOF
index 2ff598a3f9ecebaea65eaefe22b0f66c1acc0d65..9e5159c2ccfc859ee5e2e56c05ad19bceb5709a0 100755 (executable)
@@ -1,6 +1,6 @@
 cat << EOF
 /**
- * ${arch}${atomic}_add_unless - add unless the number is already a given value
+ * arch_${atomic}_add_unless - add unless the number is already a given value
  * @v: pointer of type ${atomic}_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -9,8 +9,8 @@ cat << EOF
  * Returns true if the addition was done.
  */
 static __always_inline bool
-${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
 {
-       return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
+       return arch_${atomic}_fetch_add_unless(v, a, u) != u;
 }
 EOF
index 3f18663dcefb45939aa4a3b4ea4eca8735622c2f..5a42f54a3595032881edcc91af78d081ce2e82cf 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
+arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
+       ${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
 }
 EOF
index e2e01f0574bbc662e40d3796821341beb9ef6681..8c144c818e9edadee83fe536c8f3e89bb9d2024a 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
+arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
+       ${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
 }
 EOF
index e8a5e492eb5fbc5e7415d2102bef079b97e88c32..8549f359bd0ef9a0323f4bb357f92d420062d586 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_dec_and_test - decrement and test
+ * arch_${atomic}_dec_and_test - decrement and test
  * @v: pointer of type ${atomic}_t
  *
  * Atomically decrements @v by 1 and
@@ -8,8 +8,8 @@ cat <<EOF
  * cases.
  */
 static __always_inline bool
-${arch}${atomic}_dec_and_test(${atomic}_t *v)
+arch_${atomic}_dec_and_test(${atomic}_t *v)
 {
-       return ${arch}${atomic}_dec_return(v) == 0;
+       return arch_${atomic}_dec_return(v) == 0;
 }
 EOF
index 527adec89c3783dc55416720dd8665dd1531ae3b..86bdced3428d6fa467de40e5bf398b1ec7a74f43 100755 (executable)
@@ -1,14 +1,14 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_dec_if_positive(${atomic}_t *v)
+arch_${atomic}_dec_if_positive(${atomic}_t *v)
 {
-       ${int} dec, c = ${arch}${atomic}_read(v);
+       ${int} dec, c = arch_${atomic}_read(v);
 
        do {
                dec = c - 1;
                if (unlikely(dec < 0))
                        break;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, dec));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, dec));
 
        return dec;
 }
index dcab6848ca1e5a2535c0a144cf7cb4af4d8a3e52..c531d5afecc476cef51b8a3eff2f36c461d0907c 100755 (executable)
@@ -1,13 +1,13 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_dec_unless_positive(${atomic}_t *v)
+arch_${atomic}_dec_unless_positive(${atomic}_t *v)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c > 0))
                        return false;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1));
 
        return true;
 }
index 3764fc8ce945d79cd210406662bbf222c58b69c9..07757d8e338ef5f1fcb81a630e97650d3bce5811 100755 (executable)
@@ -1,10 +1,10 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}(${params})
+arch_${atomic}_${pfx}${name}${sfx}(${params})
 {
        ${ret} ret;
        __atomic_pre_full_fence();
-       ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
        __atomic_post_full_fence();
        return ret;
 }
index 0e0b9aef151530a622cf97dbf6c4b88ee56cde16..68ce13c8b9dad4f223400951eee36d23aa32ee61 100755 (executable)
@@ -1,6 +1,6 @@
 cat << EOF
 /**
- * ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
+ * arch_${atomic}_fetch_add_unless - add unless the number is already a given value
  * @v: pointer of type ${atomic}_t
  * @a: the amount to add to v...
  * @u: ...unless v is equal to u.
@@ -9,14 +9,14 @@ cat << EOF
  * Returns original value of @v
  */
 static __always_inline ${int}
-${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c == u))
                        break;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
 
        return c;
 }
index 15ec62946e8ce52f77ea91d36a55ffe0cbec403a..3c2c3739169e51a514035b968365a35490687937 100755 (executable)
@@ -1,7 +1,7 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
+arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
 {
-       ${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v);
+       ${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v);
 }
 EOF
index cecc8322a21ff1f912adde71c4018383698c9580..0cf23fe1efb8543ea3320dbcbeba8e3c5884784f 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_inc_and_test - increment and test
+ * arch_${atomic}_inc_and_test - increment and test
  * @v: pointer of type ${atomic}_t
  *
  * Atomically increments @v by 1
@@ -8,8 +8,8 @@ cat <<EOF
  * other cases.
  */
 static __always_inline bool
-${arch}${atomic}_inc_and_test(${atomic}_t *v)
+arch_${atomic}_inc_and_test(${atomic}_t *v)
 {
-       return ${arch}${atomic}_inc_return(v) == 0;
+       return arch_${atomic}_inc_return(v) == 0;
 }
 EOF
index 50f2d4d48279c3f2c329591860e851054988378f..ed8a1f5626675297ef9735b501a3321a0dad350f 100755 (executable)
@@ -1,14 +1,14 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_inc_not_zero - increment unless the number is zero
+ * arch_${atomic}_inc_not_zero - increment unless the number is zero
  * @v: pointer of type ${atomic}_t
  *
  * Atomically increments @v by 1, if @v is non-zero.
  * Returns true if the increment was done.
  */
 static __always_inline bool
-${arch}${atomic}_inc_not_zero(${atomic}_t *v)
+arch_${atomic}_inc_not_zero(${atomic}_t *v)
 {
-       return ${arch}${atomic}_add_unless(v, 1, 0);
+       return arch_${atomic}_add_unless(v, 1, 0);
 }
 EOF
index 87629e0d4a80d9fc001a813ce8f6772f3877faa3..95d8ce48233ff4569bd67dab82c655c8ae1c6db2 100755 (executable)
@@ -1,13 +1,13 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_inc_unless_negative(${atomic}_t *v)
+arch_${atomic}_inc_unless_negative(${atomic}_t *v)
 {
-       ${int} c = ${arch}${atomic}_read(v);
+       ${int} c = arch_${atomic}_read(v);
 
        do {
                if (unlikely(c < 0))
                        return false;
-       } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1));
+       } while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1));
 
        return true;
 }
index 341a88dccaa7e3e58a4ff6615acd7eff451bd30d..803ba75610766e0be35e1fcc7e4fb72f00c4e560 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_read_acquire(const ${atomic}_t *v)
+arch_${atomic}_read_acquire(const ${atomic}_t *v)
 {
        return smp_load_acquire(&(v)->counter);
 }
index f8906d537c0f460518e526200237803464763d44..b46feb56d69caad27d92b320ff764423d0a1683d 100755 (executable)
@@ -1,8 +1,8 @@
 cat <<EOF
 static __always_inline ${ret}
-${arch}${atomic}_${pfx}${name}${sfx}_release(${params})
+arch_${atomic}_${pfx}${name}${sfx}_release(${params})
 {
        __atomic_release_fence();
-       ${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       ${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
 }
 EOF
index 76068272d5f52f5a8e7ff72b8ef587bb24a707dd..86ede759f24eacdb546bf58a8fe9565e7028e082 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 static __always_inline void
-${arch}${atomic}_set_release(${atomic}_t *v, ${int} i)
+arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
 {
        smp_store_release(&(v)->counter, i);
 }
index c580f4c2136e5488cc46793c3fa092f28e6930f4..260f37341c8881f798d98d3b9c312c63eeb52f14 100755 (executable)
@@ -1,6 +1,6 @@
 cat <<EOF
 /**
- * ${arch}${atomic}_sub_and_test - subtract value from variable and test result
+ * arch_${atomic}_sub_and_test - subtract value from variable and test result
  * @i: integer value to subtract
  * @v: pointer of type ${atomic}_t
  *
@@ -9,8 +9,8 @@ cat <<EOF
  * other cases.
  */
 static __always_inline bool
-${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
+arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
 {
-       return ${arch}${atomic}_sub_return(i, v) == 0;
+       return arch_${atomic}_sub_return(i, v) == 0;
 }
 EOF
index 06db0f738e45eb44e5adb56da5852b27d3afedab..890f850ede378512c89a50a7c79e4358051abd15 100755 (executable)
@@ -1,9 +1,9 @@
 cat <<EOF
 static __always_inline bool
-${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
+arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
 {
        ${int} r, o = *old;
-       r = ${arch}${atomic}_cmpxchg${order}(v, o, new);
+       r = arch_${atomic}_cmpxchg${order}(v, o, new);
        if (unlikely(r != o))
                *old = r;
        return likely(r == o);
index 2601ff4f9468c9896d9b3014ec7e18b7efc9dde8..8e2da71f1d5fa7548b0ba96af14571193d2ae052 100755 (executable)
@@ -2,11 +2,10 @@
 # SPDX-License-Identifier: GPL-2.0
 
 ATOMICDIR=$(dirname $0)
-ARCH=$2
 
 . ${ATOMICDIR}/atomic-tbl.sh
 
-#gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...)
+#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
 gen_template_fallback()
 {
        local template="$1"; shift
@@ -15,11 +14,10 @@ gen_template_fallback()
        local name="$1"; shift
        local sfx="$1"; shift
        local order="$1"; shift
-       local arch="$1"; shift
        local atomic="$1"; shift
        local int="$1"; shift
 
-       local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}"
+       local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
 
        local ret="$(gen_ret_type "${meta}" "${int}")"
        local retstmt="$(gen_ret_stmt "${meta}")"
@@ -34,7 +32,7 @@ gen_template_fallback()
        fi
 }
 
-#gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...)
+#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
 gen_proto_fallback()
 {
        local meta="$1"; shift
@@ -65,44 +63,26 @@ gen_proto_order_variant()
        local name="$1"; shift
        local sfx="$1"; shift
        local order="$1"; shift
-       local arch="$1"
-       local atomic="$2"
+       local atomic="$1"
 
-       local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+       local basename="arch_${atomic}_${pfx}${name}${sfx}"
 
-       printf "#define arch_${basename}${order} ${basename}${order}\n"
+       printf "#define ${basename}${order} ${basename}${order}\n"
 }
 
-#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
+#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
 gen_proto_order_variants()
 {
        local meta="$1"; shift
        local pfx="$1"; shift
        local name="$1"; shift
        local sfx="$1"; shift
-       local arch="$1"
-       local atomic="$2"
+       local atomic="$1"
 
-       local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+       local basename="arch_${atomic}_${pfx}${name}${sfx}"
 
        local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
 
-       if [ -z "$arch" ]; then
-               gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
-
-               if meta_has_acquire "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
-               fi
-               if meta_has_release "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
-               fi
-               if meta_has_relaxed "${meta}"; then
-                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
-               fi
-
-               echo ""
-       fi
-
        # If we don't have relaxed atomics, then we don't bother with ordering fallbacks
        # read_acquire and set_release need to be templated, though
        if ! meta_has_relaxed "${meta}"; then
@@ -187,38 +167,38 @@ gen_try_cmpxchg_fallback()
        local order="$1"; shift;
 
 cat <<EOF
-#ifndef ${ARCH}try_cmpxchg${order}
-#define ${ARCH}try_cmpxchg${order}(_ptr, _oldp, _new) \\
+#ifndef arch_try_cmpxchg${order}
+#define arch_try_cmpxchg${order}(_ptr, _oldp, _new) \\
 ({ \\
        typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
-       ___r = ${ARCH}cmpxchg${order}((_ptr), ___o, (_new)); \\
+       ___r = arch_cmpxchg${order}((_ptr), ___o, (_new)); \\
        if (unlikely(___r != ___o)) \\
                *___op = ___r; \\
        likely(___r == ___o); \\
 })
-#endif /* ${ARCH}try_cmpxchg${order} */
+#endif /* arch_try_cmpxchg${order} */
 
 EOF
 }
 
 gen_try_cmpxchg_fallbacks()
 {
-       printf "#ifndef ${ARCH}try_cmpxchg_relaxed\n"
-       printf "#ifdef ${ARCH}try_cmpxchg\n"
+       printf "#ifndef arch_try_cmpxchg_relaxed\n"
+       printf "#ifdef arch_try_cmpxchg\n"
 
-       gen_basic_fallbacks "${ARCH}try_cmpxchg"
+       gen_basic_fallbacks "arch_try_cmpxchg"
 
-       printf "#endif /* ${ARCH}try_cmpxchg */\n\n"
+       printf "#endif /* arch_try_cmpxchg */\n\n"
 
        for order in "" "_acquire" "_release" "_relaxed"; do
                gen_try_cmpxchg_fallback "${order}"
        done
 
-       printf "#else /* ${ARCH}try_cmpxchg_relaxed */\n"
+       printf "#else /* arch_try_cmpxchg_relaxed */\n"
 
-       gen_order_fallbacks "${ARCH}try_cmpxchg"
+       gen_order_fallbacks "arch_try_cmpxchg"
 
-       printf "#endif /* ${ARCH}try_cmpxchg_relaxed */\n\n"
+       printf "#endif /* arch_try_cmpxchg_relaxed */\n\n"
 }
 
 cat << EOF
@@ -234,14 +214,14 @@ cat << EOF
 
 EOF
 
-for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do
+for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do
        gen_xchg_fallbacks "${xchg}"
 done
 
 gen_try_cmpxchg_fallbacks
 
 grep '^[a-z]' "$1" | while read name meta args; do
-       gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args}
+       gen_proto "${meta}" "${name}" "atomic" "int" ${args}
 done
 
 cat <<EOF
@@ -252,7 +232,7 @@ cat <<EOF
 EOF
 
 grep '^[a-z]' "$1" | while read name meta args; do
-       gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args}
+       gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
 done
 
 cat <<EOF
index f776a574224d3b7d01ff3872029a6eb973abde3b..56b119f7d1c2b0822035595856988947fecbc3a0 100755 (executable)
@@ -10,7 +10,7 @@ LINUXDIR=${ATOMICDIR}/../..
 cat <<EOF |
 gen-atomic-instrumented.sh      asm-generic/atomic-instrumented.h
 gen-atomic-long.sh              asm-generic/atomic-long.h
-gen-atomic-fallback.sh          linux/atomic-arch-fallback.h           arch_
+gen-atomic-fallback.sh          linux/atomic-arch-fallback.h
 EOF
 while read script header args; do
        /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}