locking/atomic: mips: move to ARCH_ATOMIC
authorMark Rutland <mark.rutland@arm.com>
Tue, 25 May 2021 14:02:21 +0000 (15:02 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 26 May 2021 11:20:51 +0000 (13:20 +0200)
We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates mips to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-23-mark.rutland@arm.com
arch/mips/Kconfig
arch/mips/include/asm/atomic.h
arch/mips/include/asm/cmpxchg.h
arch/mips/kernel/cmpxchg.c

index ed51970c08e75c683e1872261cd2152f4546e101..55b4da96872f9e8569e7df19a37248f76196f9f3 100644 (file)
@@ -3,6 +3,7 @@ config MIPS
        bool
        default y
        select ARCH_32BIT_OFF_T if !64BIT
+       select ARCH_ATOMIC
        select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
        select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
        select ARCH_HAS_FORTIFY_SOURCE
index 27ad767915390c5ce914cad99c3755f22b9b7bdc..95e1f7f3597f45f7931f27fa3ab5ff8f9a160c8e 100644 (file)
 #include <asm/war.h>
 
 #define ATOMIC_OPS(pfx, type)                                          \
-static __always_inline type pfx##_read(const pfx##_t *v)               \
+static __always_inline type arch_##pfx##_read(const pfx##_t *v)                \
 {                                                                      \
        return READ_ONCE(v->counter);                                   \
 }                                                                      \
                                                                        \
-static __always_inline void pfx##_set(pfx##_t *v, type i)              \
+static __always_inline void arch_##pfx##_set(pfx##_t *v, type i)       \
 {                                                                      \
        WRITE_ONCE(v->counter, i);                                      \
 }                                                                      \
                                                                        \
-static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n)  \
+static __always_inline type                                            \
+arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n)                       \
 {                                                                      \
-       return cmpxchg(&v->counter, o, n);                              \
+       return arch_cmpxchg(&v->counter, o, n);                         \
 }                                                                      \
                                                                        \
-static __always_inline type pfx##_xchg(pfx##_t *v, type n)             \
+static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n)      \
 {                                                                      \
-       return xchg(&v->counter, n);                                    \
+       return arch_xchg(&v->counter, n);                               \
 }
 
 ATOMIC_OPS(atomic, int)
@@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64)
 #endif
 
 #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                 \
-static __inline__ void pfx##_##op(type i, pfx##_t * v)                 \
+static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v)          \
 {                                                                      \
        type temp;                                                      \
                                                                        \
@@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v)                        \
 }
 
 #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)          \
-static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v)        \
+static __inline__ type                                                 \
+arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v)                        \
 {                                                                      \
        type temp, result;                                              \
                                                                        \
@@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v)     \
 }
 
 #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)           \
-static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+static __inline__ type                                                 \
+arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)                 \
 {                                                                      \
        int temp, result;                                               \
                                                                        \
@@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)    \
 ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
 ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
 
-#define atomic_add_return_relaxed      atomic_add_return_relaxed
-#define atomic_sub_return_relaxed      atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed       atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed  arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed  arch_atomic_fetch_sub_relaxed
 
 #ifdef CONFIG_64BIT
 ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
 ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
-# define atomic64_add_return_relaxed   atomic64_add_return_relaxed
-# define atomic64_sub_return_relaxed   atomic64_sub_return_relaxed
-# define atomic64_fetch_add_relaxed    atomic64_fetch_add_relaxed
-# define atomic64_fetch_sub_relaxed    atomic64_fetch_sub_relaxed
+# define arch_atomic64_add_return_relaxed      arch_atomic64_add_return_relaxed
+# define arch_atomic64_sub_return_relaxed      arch_atomic64_sub_return_relaxed
+# define arch_atomic64_fetch_add_relaxed       arch_atomic64_fetch_add_relaxed
+# define arch_atomic64_fetch_sub_relaxed       arch_atomic64_fetch_sub_relaxed
 #endif /* CONFIG_64BIT */
 
 #undef ATOMIC_OPS
@@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
 ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
 ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
 
-#define atomic_fetch_and_relaxed       atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed                atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed       atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed  arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed   arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed  arch_atomic_fetch_xor_relaxed
 
 #ifdef CONFIG_64BIT
 ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
 ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
 ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
-# define atomic64_fetch_and_relaxed    atomic64_fetch_and_relaxed
-# define atomic64_fetch_or_relaxed     atomic64_fetch_or_relaxed
-# define atomic64_fetch_xor_relaxed    atomic64_fetch_xor_relaxed
+# define arch_atomic64_fetch_and_relaxed       arch_atomic64_fetch_and_relaxed
+# define arch_atomic64_fetch_or_relaxed                arch_atomic64_fetch_or_relaxed
+# define arch_atomic64_fetch_xor_relaxed       arch_atomic64_fetch_xor_relaxed
 #endif
 
 #undef ATOMIC_OPS
@@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
  * The function returns the old value of @v minus @i.
  */
 #define ATOMIC_SIP_OP(pfx, type, op, ll, sc)                           \
-static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v)       \
+static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v)        \
 {                                                                      \
        type temp, result;                                              \
                                                                        \
@@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v)  \
 }
 
 ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
-#define atomic_dec_if_positive(v)      atomic_sub_if_positive(1, v)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
 
 #ifdef CONFIG_64BIT
 ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
-#define atomic64_dec_if_positive(v)    atomic64_sub_if_positive(1, v)
+#define arch_atomic64_dec_if_positive(v)       arch_atomic64_sub_if_positive(1, v)
 #endif
 
 #undef ATOMIC_SIP_OP
index c7e0455d4d46203935c31ef778f9fa1d9f90a5db..0b983800f48b7cafcf874f863ab8c55a30bc8a10 100644 (file)
@@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
        }
 }
 
-#define xchg(ptr, x)                                                   \
+#define arch_xchg(ptr, x)                                              \
 ({                                                                     \
        __typeof__(*(ptr)) __res;                                       \
                                                                        \
@@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        }
 }
 
-#define cmpxchg_local(ptr, old, new)                                   \
+#define arch_cmpxchg_local(ptr, old, new)                              \
        ((__typeof__(*(ptr)))                                           \
                __cmpxchg((ptr),                                        \
                          (unsigned long)(__typeof__(*(ptr)))(old),     \
                          (unsigned long)(__typeof__(*(ptr)))(new),     \
                          sizeof(*(ptr))))
 
-#define cmpxchg(ptr, old, new)                                         \
+#define arch_cmpxchg(ptr, old, new)                                    \
 ({                                                                     \
        __typeof__(*(ptr)) __res;                                       \
                                                                        \
@@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        if (__SYNC_loongson3_war == 0)                                  \
                smp_mb__before_llsc();                                  \
                                                                        \
-       __res = cmpxchg_local((ptr), (old), (new));                     \
+       __res = arch_cmpxchg_local((ptr), (old), (new));                \
                                                                        \
        /*                                                              \
         * In the Loongson3 workaround case __cmpxchg_asm() already     \
@@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 })
 
 #ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n)                                     \
+#define arch_cmpxchg64_local(ptr, o, n)                                        \
   ({                                                                   \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg_local((ptr), (o), (n));                                 \
+       arch_cmpxchg_local((ptr), (o), (n));                            \
   })
 
-#define cmpxchg64(ptr, o, n)                                           \
+#define arch_cmpxchg64(ptr, o, n)                                      \
   ({                                                                   \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg((ptr), (o), (n));                                       \
+       arch_cmpxchg((ptr), (o), (n));                                  \
   })
 #else
 
 # include <asm-generic/cmpxchg-local.h>
-# define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
 
 # ifdef CONFIG_SMP
 
@@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
        return ret;
 }
 
-#  define cmpxchg64(ptr, o, n) ({                                      \
+#  define arch_cmpxchg64(ptr, o, n) ({                                 \
        unsigned long long __old = (__typeof__(*(ptr)))(o);             \
        unsigned long long __new = (__typeof__(*(ptr)))(n);             \
        __typeof__(*(ptr)) __res;                                       \
@@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
 })
 
 # else /* !CONFIG_SMP */
-#  define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#  define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
 # endif /* !CONFIG_SMP */
 #endif /* !CONFIG_64BIT */
 
index 89107deb03fcb9a364493f8acc5e874244ae1386..ac9c8cfb2ba9e3da7ba337d3044b76a43ce37acd 100644 (file)
@@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
        do {
                old32 = load32;
                new32 = (load32 & ~mask) | (val << shift);
-               load32 = cmpxchg(ptr32, old32, new32);
+               load32 = arch_cmpxchg(ptr32, old32, new32);
        } while (load32 != old32);
 
        return (load32 & mask) >> shift;
@@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
                 */
                old32 = (load32 & ~mask) | (old << shift);
                new32 = (load32 & ~mask) | (new << shift);
-               load32 = cmpxchg(ptr32, old32, new32);
+               load32 = arch_cmpxchg(ptr32, old32, new32);
                if (load32 == old32)
                        return old;
        }