locking/atomic: csky: move to ARCH_ATOMIC
authorMark Rutland <mark.rutland@arm.com>
Tue, 25 May 2021 14:02:15 +0000 (15:02 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 26 May 2021 11:20:51 +0000 (13:20 +0200)
We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates csky to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Guo Ren <guoren@kernel.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-17-mark.rutland@arm.com
arch/csky/Kconfig
arch/csky/include/asm/cmpxchg.h

index 8de5b987edb9f04487e70a76dcc3454dbed43c45..3521f14bcd969e4a94c92b9e840c517c61c2465c 100644 (file)
@@ -2,6 +2,7 @@
 config CSKY
        def_bool y
        select ARCH_32BIT_OFF_T
+       select ARCH_ATOMIC
        select ARCH_HAS_DMA_PREP_COHERENT
        select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_SYNC_DMA_FOR_CPU
index dabc8e46ce7b400c484de3118a4c7b3c4d6320d2..d1bef11f8dc9710d2d36b47ec83f276bd6410656 100644 (file)
@@ -31,7 +31,7 @@ extern void __bad_xchg(void);
        __ret;                                                  \
 })
 
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
                (__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
 
 #define __cmpxchg_relaxed(ptr, old, new, size)                 \
@@ -61,14 +61,14 @@ extern void __bad_xchg(void);
        __ret;                                                  \
 })
 
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
        (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
 
-#define cmpxchg(ptr, o, n)                                     \
+#define arch_cmpxchg(ptr, o, n)                                \
 ({                                                             \
        __typeof__(*(ptr)) __ret;                               \
        __smp_release_fence();                                  \
-       __ret = cmpxchg_relaxed(ptr, o, n);                     \
+       __ret = arch_cmpxchg_relaxed(ptr, o, n);                \
        __smp_acquire_fence();                                  \
        __ret;                                                  \
 })