locking/atomic: make atomic*_{cmp,}xchg optional
authorMark Rutland <mark.rutland@arm.com>
Mon, 5 Jun 2023 07:01:01 +0000 (08:01 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 5 Jun 2023 07:57:14 +0000 (09:57 +0200)
Most architectures define the atomic/atomic64 xchg and cmpxchg
operations in terms of arch_xchg and arch_cmpxchg respectfully.

Add fallbacks for these cases and remove the trivial cases from arch
code. On some architectures the existing definitions are kept as these
are used to build other arch_atomic*() operations.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-5-mark.rutland@arm.com
23 files changed:
arch/alpha/include/asm/atomic.h
arch/arc/include/asm/atomic.h
arch/arc/include/asm/atomic64-arcv2.h
arch/arm/include/asm/atomic.h
arch/arm64/include/asm/atomic.h
arch/csky/include/asm/atomic.h
arch/hexagon/include/asm/atomic.h
arch/ia64/include/asm/atomic.h
arch/loongarch/include/asm/atomic.h
arch/m68k/include/asm/atomic.h
arch/mips/include/asm/atomic.h
arch/openrisc/include/asm/atomic.h
arch/parisc/include/asm/atomic.h
arch/powerpc/include/asm/atomic.h
arch/riscv/include/asm/atomic.h
arch/sh/include/asm/atomic.h
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/atomic_64.h
arch/xtensa/include/asm/atomic.h
include/asm-generic/atomic.h
include/linux/atomic/atomic-arch-fallback.h
scripts/atomic/fallbacks/cmpxchg [new file with mode: 0644]
scripts/atomic/fallbacks/xchg [new file with mode: 0644]

index f2861a43a61efc4fd0cb1d971f1a186cec13ca16..ec8ab552c527a133a8f01fdca4810a89f7b52428 100644 (file)
@@ -200,16 +200,6 @@ ATOMIC_OPS(xor, xor)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define arch_atomic64_cmpxchg(v, old, new) \
-       (arch_cmpxchg(&((v)->counter), old, new))
-#define arch_atomic64_xchg(v, new) \
-       (arch_xchg(&((v)->counter), new))
-
-#define arch_atomic_cmpxchg(v, old, new) \
-       (arch_cmpxchg(&((v)->counter), old, new))
-#define arch_atomic_xchg(v, new) \
-       (arch_xchg(&((v)->counter), new))
-
 /**
  * arch_atomic_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
index 52ee51e1ff7c27f795ec3fe17314a30878e7bb28..592d7fffc223c923f4d63ccf27ad01fb9dc56e49 100644 (file)
 #include <asm/atomic-spinlock.h>
 #endif
 
-#define arch_atomic_cmpxchg(v, o, n)                                   \
-({                                                                     \
-       arch_cmpxchg(&((v)->counter), (o), (n));                        \
-})
-
-#ifdef arch_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_relaxed(v, o, n)                           \
-({                                                                     \
-       arch_cmpxchg_relaxed(&((v)->counter), (o), (n));                \
-})
-#endif
-
-#define arch_atomic_xchg(v, n)                                         \
-({                                                                     \
-       arch_xchg(&((v)->counter), (n));                                \
-})
-
-#ifdef arch_xchg_relaxed
-#define arch_atomic_xchg_relaxed(v, n)                                 \
-({                                                                     \
-       arch_xchg_relaxed(&((v)->counter), (n));                        \
-})
-#endif
-
 /*
  * 64-bit atomics
  */
index c5a8010fdc97ddb0145dc02729aa37f74140a3cd..2b7c9e61a29475ce497b1a82685aceb83617448e 100644 (file)
@@ -159,6 +159,7 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
 
        return prev;
 }
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
 
 static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
 {
@@ -179,6 +180,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
 
        return prev;
 }
+#define arch_atomic64_xchg arch_atomic64_xchg
 
 /**
  * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
index db8512d9a918de2fd00cd1313a4f3a3761c16144..9458d47ff209c482539bce1fe095b41fc5d95f71 100644 (file)
@@ -210,6 +210,7 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 
        return ret;
 }
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
 
 #define arch_atomic_fetch_andnot               arch_atomic_fetch_andnot
 
@@ -240,8 +241,6 @@ ATOMIC_OPS(xor, ^=, eor)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
 #ifndef CONFIG_GENERIC_ATOMIC64
 typedef struct {
        s64 counter;
index c9979273d38981164bdfb6016760a9c77774f6aa..400d279e0f8d0237d69eddd42ca7d9b12f26fca0 100644 (file)
@@ -142,24 +142,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
 #define arch_atomic_fetch_xor_release          arch_atomic_fetch_xor_release
 #define arch_atomic_fetch_xor                  arch_atomic_fetch_xor
 
-#define arch_atomic_xchg_relaxed(v, new) \
-       arch_xchg_relaxed(&((v)->counter), (new))
-#define arch_atomic_xchg_acquire(v, new) \
-       arch_xchg_acquire(&((v)->counter), (new))
-#define arch_atomic_xchg_release(v, new) \
-       arch_xchg_release(&((v)->counter), (new))
-#define arch_atomic_xchg(v, new) \
-       arch_xchg(&((v)->counter), (new))
-
-#define arch_atomic_cmpxchg_relaxed(v, old, new) \
-       arch_cmpxchg_relaxed(&((v)->counter), (old), (new))
-#define arch_atomic_cmpxchg_acquire(v, old, new) \
-       arch_cmpxchg_acquire(&((v)->counter), (old), (new))
-#define arch_atomic_cmpxchg_release(v, old, new) \
-       arch_cmpxchg_release(&((v)->counter), (old), (new))
-#define arch_atomic_cmpxchg(v, old, new) \
-       arch_cmpxchg(&((v)->counter), (old), (new))
-
 #define arch_atomic_andnot                     arch_atomic_andnot
 
 /*
@@ -209,16 +191,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
 #define arch_atomic64_fetch_xor_release                arch_atomic64_fetch_xor_release
 #define arch_atomic64_fetch_xor                        arch_atomic64_fetch_xor
 
-#define arch_atomic64_xchg_relaxed             arch_atomic_xchg_relaxed
-#define arch_atomic64_xchg_acquire             arch_atomic_xchg_acquire
-#define arch_atomic64_xchg_release             arch_atomic_xchg_release
-#define arch_atomic64_xchg                     arch_atomic_xchg
-
-#define arch_atomic64_cmpxchg_relaxed          arch_atomic_cmpxchg_relaxed
-#define arch_atomic64_cmpxchg_acquire          arch_atomic_cmpxchg_acquire
-#define arch_atomic64_cmpxchg_release          arch_atomic_cmpxchg_release
-#define arch_atomic64_cmpxchg                  arch_atomic_cmpxchg
-
 #define arch_atomic64_andnot                   arch_atomic64_andnot
 
 #define arch_atomic64_dec_if_positive          arch_atomic64_dec_if_positive
index 60406ef9c2bbcb573f7ad6f7b5b8bdbe4e55e000..4dab44f6143a5fe6f2fc10dae0f2d76284261839 100644 (file)
@@ -195,41 +195,6 @@ arch_atomic_dec_if_positive(atomic_t *v)
 }
 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
 
-#define ATOMIC_OP()                                                    \
-static __always_inline                                                 \
-int arch_atomic_xchg_relaxed(atomic_t *v, int n)                       \
-{                                                                      \
-       return __xchg_relaxed(n, &(v->counter), 4);                     \
-}                                                                      \
-static __always_inline                                                 \
-int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n)             \
-{                                                                      \
-       return __cmpxchg_relaxed(&(v->counter), o, n, 4);               \
-}                                                                      \
-static __always_inline                                                 \
-int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n)             \
-{                                                                      \
-       return __cmpxchg_acquire(&(v->counter), o, n, 4);               \
-}                                                                      \
-static __always_inline                                                 \
-int arch_atomic_cmpxchg(atomic_t *v, int o, int n)                     \
-{                                                                      \
-       return __cmpxchg(&(v->counter), o, n, 4);                       \
-}
-
-#define ATOMIC_OPS()                                                   \
-       ATOMIC_OP()
-
-ATOMIC_OPS()
-
-#define arch_atomic_xchg_relaxed       arch_atomic_xchg_relaxed
-#define arch_atomic_cmpxchg_relaxed    arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire    arch_atomic_cmpxchg_acquire
-#define arch_atomic_cmpxchg            arch_atomic_cmpxchg
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP
-
 #else
 #include <asm-generic/atomic.h>
 #endif
index 738857e10d6ec4902f6d12dc0a53a21e0cec8d47..ad6c111e9c10ff0e736aac8c775211ec0d504b85 100644 (file)
@@ -36,12 +36,6 @@ static inline void arch_atomic_set(atomic_t *v, int new)
  */
 #define arch_atomic_read(v)            READ_ONCE((v)->counter)
 
-#define arch_atomic_xchg(v, new)                                       \
-       (arch_xchg(&((v)->counter), (new)))
-
-#define arch_atomic_cmpxchg(v, old, new)                               \
-       (arch_cmpxchg(&((v)->counter), (old), (new)))
-
 #define ATOMIC_OP(op)                                                  \
 static inline void arch_atomic_##op(int i, atomic_t *v)                        \
 {                                                                      \
index 266c429b91372e9302d373ccbd00684df414d6e9..6540a628d2573cb536949aefa1eb8b1141e99c67 100644 (file)
@@ -207,13 +207,6 @@ ATOMIC64_FETCH_OP(xor, ^)
 #undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP
 
-#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
-#define arch_atomic64_cmpxchg(v, old, new) \
-       (arch_cmpxchg(&((v)->counter), old, new))
-#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
 #define arch_atomic_add(i,v)           (void)arch_atomic_add_return((i), (v))
 #define arch_atomic_sub(i,v)           (void)arch_atomic_sub_return((i), (v))
 
index 6b9aca9ab6e9f284e64d57d1d43f61b97726c166..8d73c85911b08da40087252f0e55579dca9c3d5c 100644 (file)
@@ -181,9 +181,6 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
        return result;
 }
 
-#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
-
 /*
  * arch_atomic_dec_if_positive - decrement by 1 if old value positive
  * @v: pointer of type atomic_t
@@ -342,10 +339,6 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
        return result;
 }
 
-#define arch_atomic64_cmpxchg(v, o, n) \
-       ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
-
 /*
  * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
  * @v: pointer of type atomic64_t
index cfba83d230fdec37b34685e914be435e0eafec5d..190a032f19be7c4ed11dada7d8262a85ca0e8740 100644 (file)
@@ -158,12 +158,7 @@ static inline int arch_atomic_inc_and_test(atomic_t *v)
 }
 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
 
-#ifdef CONFIG_RMW_INSNS
-
-#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
-#else /* !CONFIG_RMW_INSNS */
+#ifndef CONFIG_RMW_INSNS
 
 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 {
@@ -177,6 +172,7 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
        local_irq_restore(flags);
        return prev;
 }
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
 
 static inline int arch_atomic_xchg(atomic_t *v, int new)
 {
@@ -189,6 +185,7 @@ static inline int arch_atomic_xchg(atomic_t *v, int new)
        local_irq_restore(flags);
        return prev;
 }
+#define arch_atomic_xchg arch_atomic_xchg
 
 #endif /* !CONFIG_RMW_INSNS */
 
index 712fb5a6a5682b26d0e78a862796deecc4130b05..ba188e77768b28785fa5d453b9189ffd5a0627d2 100644 (file)
@@ -33,17 +33,6 @@ static __always_inline void arch_##pfx##_set(pfx##_t *v, type i)     \
 {                                                                      \
        WRITE_ONCE(v->counter, i);                                      \
 }                                                                      \
-                                                                       \
-static __always_inline type                                            \
-arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n)                       \
-{                                                                      \
-       return arch_cmpxchg(&v->counter, o, n);                         \
-}                                                                      \
-                                                                       \
-static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n)      \
-{                                                                      \
-       return arch_xchg(&v->counter, n);                               \
-}
 
 ATOMIC_OPS(atomic, int)
 
index 326167e4783a9942770d0dd19bed922c7b051100..8ce67ec7c9a302b7b742a342704aa7c943f681b0 100644 (file)
@@ -130,7 +130,4 @@ static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
 
 #include <asm/cmpxchg.h>
 
-#define arch_atomic_xchg(ptr, v)               (arch_xchg(&(ptr)->counter, (v)))
-#define arch_atomic_cmpxchg(v, old, new)       (arch_cmpxchg(&((v)->counter), (old), (new)))
-
 #endif /* __ASM_OPENRISC_ATOMIC_H */
index dd5a299ada6951fe486cea3f16b6a47f3a931428..0b3f64c92e3c0053a69fce9d08b3a5b28e507ed6 100644 (file)
@@ -73,10 +73,6 @@ static __inline__ int arch_atomic_read(const atomic_t *v)
        return READ_ONCE((v)->counter);
 }
 
-/* exported interface */
-#define arch_atomic_cmpxchg(v, o, n)   (arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic_xchg(v, new)       (arch_xchg(&((v)->counter), new))
-
 #define ATOMIC_OP(op, c_op)                                            \
 static __inline__ void arch_atomic_##op(int i, atomic_t *v)            \
 {                                                                      \
@@ -218,11 +214,6 @@ arch_atomic64_read(const atomic64_t *v)
        return READ_ONCE((v)->counter);
 }
 
-/* exported interface */
-#define arch_atomic64_cmpxchg(v, o, n) \
-       ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
 #endif /* !CONFIG_64BIT */
 
 
index 47228b17747811160b7c5dcd3cf5e98a31cf8aba..5bf6a4d49268c702a67c42f15c4a750d12fa565b 100644 (file)
@@ -126,18 +126,6 @@ ATOMIC_OPS(xor, xor, "", K)
 #undef ATOMIC_OP_RETURN_RELAXED
 #undef ATOMIC_OP
 
-#define arch_atomic_cmpxchg(v, o, n) \
-       (arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic_cmpxchg_relaxed(v, o, n) \
-       arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define arch_atomic_cmpxchg_acquire(v, o, n) \
-       arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-
-#define arch_atomic_xchg(v, new) \
-       (arch_xchg(&((v)->counter), new))
-#define arch_atomic_xchg_relaxed(v, new) \
-       arch_xchg_relaxed(&((v)->counter), (new))
-
 /**
  * atomic_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
@@ -396,18 +384,6 @@ static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 }
 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
-#define arch_atomic64_cmpxchg(v, o, n) \
-       (arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
-       arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define arch_atomic64_cmpxchg_acquire(v, o, n) \
-       arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-
-#define arch_atomic64_xchg(v, new) \
-       (arch_xchg(&((v)->counter), new))
-#define arch_atomic64_xchg_relaxed(v, new) \
-       arch_xchg_relaxed(&((v)->counter), (new))
-
 /**
  * atomic64_fetch_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
index bba472928b53932880f845625deff21683045fa8..f5dfef6c2153f189288d3a4f3fa7a1d5d1b5021b 100644 (file)
@@ -238,78 +238,6 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
 #endif
 
-/*
- * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
- * {cmp,}xchg and the operations that return, so they need a full barrier.
- */
-#define ATOMIC_OP(c_t, prefix, size)                                   \
-static __always_inline                                                 \
-c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n)   \
-{                                                                      \
-       return __xchg_relaxed(&(v->counter), n, size);                  \
-}                                                                      \
-static __always_inline                                                 \
-c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n)   \
-{                                                                      \
-       return __xchg_acquire(&(v->counter), n, size);                  \
-}                                                                      \
-static __always_inline                                                 \
-c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n)   \
-{                                                                      \
-       return __xchg_release(&(v->counter), n, size);                  \
-}                                                                      \
-static __always_inline                                                 \
-c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n)           \
-{                                                                      \
-       return __arch_xchg(&(v->counter), n, size);                     \
-}                                                                      \
-static __always_inline                                                 \
-c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v,       \
-                                    c_t o, c_t n)                      \
-{                                                                      \
-       return __cmpxchg_relaxed(&(v->counter), o, n, size);            \
-}                                                                      \
-static __always_inline                                                 \
-c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v,       \
-                                    c_t o, c_t n)                      \
-{                                                                      \
-       return __cmpxchg_acquire(&(v->counter), o, n, size);            \
-}                                                                      \
-static __always_inline                                                 \
-c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v,       \
-                                    c_t o, c_t n)                      \
-{                                                                      \
-       return __cmpxchg_release(&(v->counter), o, n, size);            \
-}                                                                      \
-static __always_inline                                                 \
-c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
-{                                                                      \
-       return __cmpxchg(&(v->counter), o, n, size);                    \
-}
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#define ATOMIC_OPS()                                                   \
-       ATOMIC_OP(int,   , 4)
-#else
-#define ATOMIC_OPS()                                                   \
-       ATOMIC_OP(int,   , 4)                                           \
-       ATOMIC_OP(s64, 64, 8)
-#endif
-
-ATOMIC_OPS()
-
-#define arch_atomic_xchg_relaxed       arch_atomic_xchg_relaxed
-#define arch_atomic_xchg_acquire       arch_atomic_xchg_acquire
-#define arch_atomic_xchg_release       arch_atomic_xchg_release
-#define arch_atomic_xchg               arch_atomic_xchg
-#define arch_atomic_cmpxchg_relaxed    arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire    arch_atomic_cmpxchg_acquire
-#define arch_atomic_cmpxchg_release    arch_atomic_cmpxchg_release
-#define arch_atomic_cmpxchg            arch_atomic_cmpxchg
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP
-
 static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
 {
        int prev, rc;
index 528bfeda78f56933160785f051671456b961108c..7a18cb2a1c1ac642d3f0bf714458cb055f119dd1 100644 (file)
@@ -30,9 +30,6 @@
 #include <asm/atomic-irq.h>
 #endif
 
-#define arch_atomic_xchg(v, new)       (arch_xchg(&((v)->counter), new))
-#define arch_atomic_cmpxchg(v, o, n)   (arch_cmpxchg(&((v)->counter), (o), (n)))
-
 #endif /* CONFIG_CPU_J2 */
 
 #endif /* __ASM_SH_ATOMIC_H */
index d775daa83d1291c5607b52b44a3fdd9051685fa6..1c9e6c7366e41a38e88919cd8ee497f748ea2e93 100644 (file)
@@ -24,7 +24,9 @@ int arch_atomic_fetch_and(int, atomic_t *);
 int arch_atomic_fetch_or(int, atomic_t *);
 int arch_atomic_fetch_xor(int, atomic_t *);
 int arch_atomic_cmpxchg(atomic_t *, int, int);
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
 int arch_atomic_xchg(atomic_t *, int);
+#define arch_atomic_xchg arch_atomic_xchg
 int arch_atomic_fetch_add_unless(atomic_t *, int, int);
 void arch_atomic_set(atomic_t *, int);
 
index 077891686715a1f076c5d3db957e7912d9099fb8..df6a8b07d7e63cc06204ec19171bbe975e937bf9 100644 (file)
@@ -49,17 +49,6 @@ ATOMIC_OPS(xor)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
-
-static inline int arch_atomic_xchg(atomic_t *v, int new)
-{
-       return arch_xchg(&v->counter, new);
-}
-
-#define arch_atomic64_cmpxchg(v, o, n) \
-       ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
 s64 arch_atomic64_dec_if_positive(atomic64_t *v);
 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
index 52da614f953ce007b90173413baf7da9d91c281a..1d323a864002cb90f512d695322dad8e3c8ce931 100644 (file)
@@ -257,7 +257,4 @@ ATOMIC_OPS(xor)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-
 #endif /* _XTENSA_ATOMIC_H */
index e271d6708c876bd94d96b4bcc369f793c2e0b370..22142c71d35a102e186e40f9033cbb40325c8389 100644 (file)
@@ -130,7 +130,4 @@ ATOMIC_OP(xor, ^)
 #define arch_atomic_read(v)                    READ_ONCE((v)->counter)
 #define arch_atomic_set(v, i)                  WRITE_ONCE(((v)->counter), (i))
 
-#define arch_atomic_xchg(ptr, v)               (arch_xchg(&(ptr)->counter, (u32)(v)))
-#define arch_atomic_cmpxchg(v, old, new)       (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new)))
-
 #endif /* __ASM_GENERIC_ATOMIC_H */
index 3ce4cb5e790c5efa2798f3692867091bc3c241df..1a2d81dbc2e488d2b705a2d7b67bab5a15baf4e2 100644 (file)
@@ -1091,9 +1091,48 @@ arch_atomic_fetch_xor(int i, atomic_t *v)
 #endif /* arch_atomic_fetch_xor_relaxed */
 
 #ifndef arch_atomic_xchg_relaxed
+#ifdef arch_atomic_xchg
 #define arch_atomic_xchg_acquire arch_atomic_xchg
 #define arch_atomic_xchg_release arch_atomic_xchg
 #define arch_atomic_xchg_relaxed arch_atomic_xchg
+#endif /* arch_atomic_xchg */
+
+#ifndef arch_atomic_xchg
+static __always_inline int
+arch_atomic_xchg(atomic_t *v, int new)
+{
+       return arch_xchg(&v->counter, new);
+}
+#define arch_atomic_xchg arch_atomic_xchg
+#endif
+
+#ifndef arch_atomic_xchg_acquire
+static __always_inline int
+arch_atomic_xchg_acquire(atomic_t *v, int new)
+{
+       return arch_xchg_acquire(&v->counter, new);
+}
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#endif
+
+#ifndef arch_atomic_xchg_release
+static __always_inline int
+arch_atomic_xchg_release(atomic_t *v, int new)
+{
+       return arch_xchg_release(&v->counter, new);
+}
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#endif
+
+#ifndef arch_atomic_xchg_relaxed
+static __always_inline int
+arch_atomic_xchg_relaxed(atomic_t *v, int new)
+{
+       return arch_xchg_relaxed(&v->counter, new);
+}
+#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
+#endif
+
 #else /* arch_atomic_xchg_relaxed */
 
 #ifndef arch_atomic_xchg_acquire
@@ -1133,9 +1172,48 @@ arch_atomic_xchg(atomic_t *v, int i)
 #endif /* arch_atomic_xchg_relaxed */
 
 #ifndef arch_atomic_cmpxchg_relaxed
+#ifdef arch_atomic_cmpxchg
 #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
 #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
 #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
+#endif /* arch_atomic_cmpxchg */
+
+#ifndef arch_atomic_cmpxchg
+static __always_inline int
+arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       return arch_cmpxchg(&v->counter, old, new);
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#endif
+
+#ifndef arch_atomic_cmpxchg_acquire
+static __always_inline int
+arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+       return arch_cmpxchg_acquire(&v->counter, old, new);
+}
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_cmpxchg_release
+static __always_inline int
+arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+       return arch_cmpxchg_release(&v->counter, old, new);
+}
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_cmpxchg_relaxed
+static __always_inline int
+arch_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+       return arch_cmpxchg_relaxed(&v->counter, old, new);
+}
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
+#endif
+
 #else /* arch_atomic_cmpxchg_relaxed */
 
 #ifndef arch_atomic_cmpxchg_acquire
@@ -2225,9 +2303,48 @@ arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
 #endif /* arch_atomic64_fetch_xor_relaxed */
 
 #ifndef arch_atomic64_xchg_relaxed
+#ifdef arch_atomic64_xchg
 #define arch_atomic64_xchg_acquire arch_atomic64_xchg
 #define arch_atomic64_xchg_release arch_atomic64_xchg
 #define arch_atomic64_xchg_relaxed arch_atomic64_xchg
+#endif /* arch_atomic64_xchg */
+
+#ifndef arch_atomic64_xchg
+static __always_inline s64
+arch_atomic64_xchg(atomic64_t *v, s64 new)
+{
+       return arch_xchg(&v->counter, new);
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+#endif
+
+#ifndef arch_atomic64_xchg_acquire
+static __always_inline s64
+arch_atomic64_xchg_acquire(atomic64_t *v, s64 new)
+{
+       return arch_xchg_acquire(&v->counter, new);
+}
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#endif
+
+#ifndef arch_atomic64_xchg_release
+static __always_inline s64
+arch_atomic64_xchg_release(atomic64_t *v, s64 new)
+{
+       return arch_xchg_release(&v->counter, new);
+}
+#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+#endif
+
+#ifndef arch_atomic64_xchg_relaxed
+static __always_inline s64
+arch_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
+{
+       return arch_xchg_relaxed(&v->counter, new);
+}
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
+#endif
+
 #else /* arch_atomic64_xchg_relaxed */
 
 #ifndef arch_atomic64_xchg_acquire
@@ -2267,9 +2384,48 @@ arch_atomic64_xchg(atomic64_t *v, s64 i)
 #endif /* arch_atomic64_xchg_relaxed */
 
 #ifndef arch_atomic64_cmpxchg_relaxed
+#ifdef arch_atomic64_cmpxchg
 #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
 #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
 #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
+#endif /* arch_atomic64_cmpxchg */
+
+#ifndef arch_atomic64_cmpxchg
+static __always_inline s64
+arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+       return arch_cmpxchg(&v->counter, old, new);
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#endif
+
+#ifndef arch_atomic64_cmpxchg_acquire
+static __always_inline s64
+arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+       return arch_cmpxchg_acquire(&v->counter, old, new);
+}
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_cmpxchg_release
+static __always_inline s64
+arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+       return arch_cmpxchg_release(&v->counter, old, new);
+}
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_cmpxchg_relaxed
+static __always_inline s64
+arch_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+       return arch_cmpxchg_relaxed(&v->counter, old, new);
+}
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
+#endif
+
 #else /* arch_atomic64_cmpxchg_relaxed */
 
 #ifndef arch_atomic64_cmpxchg_acquire
@@ -2597,4 +2753,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
 #endif
 
 #endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 9f0fd6ed53267c6ec64e36cd18e6fd8df57ea277
+// e1cee558cc61cae887890db30fcdf93baca9f498
diff --git a/scripts/atomic/fallbacks/cmpxchg b/scripts/atomic/fallbacks/cmpxchg
new file mode 100644 (file)
index 0000000..87cd010
--- /dev/null
@@ -0,0 +1,7 @@
+cat <<EOF
+static __always_inline ${int}
+arch_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new)
+{
+       return arch_cmpxchg${order}(&v->counter, old, new);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/xchg b/scripts/atomic/fallbacks/xchg
new file mode 100644 (file)
index 0000000..733b898
--- /dev/null
@@ -0,0 +1,7 @@
+cat <<EOF
+static __always_inline ${int}
+arch_${atomic}_xchg${order}(${atomic}_t *v, ${int} new)
+{
+       return arch_xchg${order}(&v->counter, new);
+}
+EOF