atomics/treewide: Make atomic64_inc_not_zero() optional
authorMark Rutland <mark.rutland@arm.com>
Thu, 21 Jun 2018 12:13:08 +0000 (13:13 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 21 Jun 2018 12:22:33 +0000 (14:22 +0200)
We define a trivial fallback for atomic_inc_not_zero(), but don't do
the same for atomic64_inc_not_zero(), leading most architectures to
define the same boilerplate.

Let's add a fallback in <linux/atomic.h>, and remove the redundant
implementations. Note that atomic64_add_unless() is always defined in
<linux/atomic.h>, and promotes its arguments to the requisite types, so
we need not do this explicitly.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Palmer Dabbelt <palmer@sifive.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/lkml/20180621121321.4761-6-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
16 files changed:
arch/alpha/include/asm/atomic.h
arch/arc/include/asm/atomic.h
arch/arm/include/asm/atomic.h
arch/arm64/include/asm/atomic.h
arch/ia64/include/asm/atomic.h
arch/mips/include/asm/atomic.h
arch/parisc/include/asm/atomic.h
arch/powerpc/include/asm/atomic.h
arch/riscv/include/asm/atomic.h
arch/s390/include/asm/atomic.h
arch/sparc/include/asm/atomic_64.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h
include/asm-generic/atomic-instrumented.h
include/asm-generic/atomic64.h
include/linux/atomic.h

index 392b15a4dd4fe6c62943615e1bce6b5bdb2650f5..eb0f25e4c5ddbde21a6e8d6aac6f1acbf925f3a2 100644 (file)
@@ -296,8 +296,6 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
        return old - 1;
 }
 
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
 
index cecdf3403cafaeb35d5d5aeb13557a20c2a1186b..1406825b5e7d2d86784dcda005458d85cd9b721b 100644 (file)
@@ -603,7 +603,6 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 #define atomic64_dec(v)                        atomic64_sub(1LL, (v))
 #define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
 
 #endif /* !CONFIG_GENERIC_ATOMIC64 */
 
index 9d56d0727c9bf95deb444f96d1285155eea3cd0e..02f3894faa48bdfe2eae52ae0feddbdc7d962d5a 100644 (file)
@@ -534,7 +534,6 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 #define atomic64_dec(v)                        atomic64_sub(1LL, (v))
 #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
 
 #endif /* !CONFIG_GENERIC_ATOMIC64 */
 #endif
index 264d20339f74bc3761171b72263454d5a9fd2bcb..ad50412889c5198b08e0b945d55cd8bf8090ccb4 100644 (file)
 #define atomic64_add_unless(v, a, u)   (___atomic_add_unless(v, a, u, 64) != u)
 #define atomic64_andnot                        atomic64_andnot
 
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
-
 #endif
 #endif
index 9d2ddde5f9d530d9255d631990104d6e8e45cf64..93d48b8232200db0c77f4194ebe6be0978bf4e33 100644 (file)
@@ -246,8 +246,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
        return c != (u);
 }
 
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
 {
        long c, old, dec;
index 02fc1553cf9b24af534dbf5ec6f3d2bde184f30b..502e691c63937ab63227ec29d07f70e1f3dd5050 100644 (file)
@@ -644,8 +644,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
        return c != (u);
 }
 
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
 
index 7748abced766c4bdd4bfb55ff53eb7a9e95b8a4b..3fd0243bf40526dbf13cd2094879a0236df5d04f 100644 (file)
@@ -305,8 +305,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
        return c != (u);
 }
 
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
 /*
  * atomic64_dec_if_positive - decrement by 1 if old value positive
  * @v: pointer of type atomic_t
index 1483261080a1fdc007e708acd620325cf252d0c3..e59620ee4f6b5c6498e92ca0a1f0467b77ead5a7 100644 (file)
@@ -582,6 +582,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
 
        return t1 != 0;
 }
+#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
 
 #endif /* __powerpc64__ */
 
index 0e27e050ba149bd1913d3ac0294afedc31fa93cc..18259e90f57e8f405bd035b830f1291edf7a9afb 100644 (file)
@@ -375,13 +375,6 @@ static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 }
 #endif
 
-#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
-{
-        return atomic64_add_unless(v, 1, 0);
-}
-#endif
-
 /*
  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
  * {cmp,}xchg and the operations that return, so they need a full barrier.
index c2858cdd8c299bff36f4d7b1c26d2b7c6682541d..66dac30a4fe1e8bb41b01d30c54a15895360bfef 100644 (file)
@@ -212,6 +212,5 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_dec(_v)               atomic64_sub(1, _v)
 #define atomic64_dec_return(_v)                atomic64_sub_return(1, _v)
 #define atomic64_dec_and_test(_v)      (atomic64_sub_return(1, _v) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
 #endif /* __ARCH_S390_ATOMIC__  */
index f416fd3d270854f595ec6f9cb01ce714ae3337dc..07830a316464aa7269f9850ad4fb44528908415f 100644 (file)
@@ -123,8 +123,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
        return c != (u);
 }
 
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
 long atomic64_dec_if_positive(atomic64_t *v);
 
 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
index 92212bf0484fecdd3922fefd3ab16c71e17b476f..2a33cc17801b1d47be06580fc1a3f51f80ac7782 100644 (file)
@@ -295,7 +295,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
        return (int)a;
 }
 
-
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 {
        int r;
index 6106b59d326066f869cc105939c7fb18dfa51310..6f95023894b7e9f163642386105c50026d4da45f 100644 (file)
@@ -207,8 +207,6 @@ static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
        return true;
 }
 
-#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
-
 /*
  * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
  * @v: pointer of type atomic_t
index 497faa4a05e35af9aeb62608072c180427077467..83bb88d791c43fbc4dd2a25c3d96fa9c020bd09b 100644 (file)
@@ -205,11 +205,14 @@ static __always_inline s64 atomic64_dec_return(atomic64_t *v)
        return arch_atomic64_dec_return(v);
 }
 
+#ifdef arch_atomic64_inc_not_zero
+#define atomic64_inc_not_zero atomic64_inc_not_zero
 static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_inc_not_zero(v);
 }
+#endif
 
 static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
 {
index a951a721e1bbd5c0eb8db3edf3ad4f0fbab451a7..5105275ac8258a9c1b207cd4960e576ea9dce49e 100644 (file)
@@ -63,6 +63,5 @@ extern bool    atomic64_add_unless(atomic64_t *v, long long a, long long u);
 #define atomic64_dec(v)                        atomic64_sub(1LL, (v))
 #define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
 
 #endif  /*  _ASM_GENERIC_ATOMIC64_H  */
index 307a7f6d619a93a66d76663097b8ced2f12662a5..ae3f30923d05b31e9999c31d093018b6d78ee7a7 100644 (file)
@@ -1019,6 +1019,17 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 #define atomic64_try_cmpxchg_release   atomic64_try_cmpxchg
 #endif /* atomic64_try_cmpxchg */
 
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+#ifndef atomic64_inc_not_zero
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
+#endif
+
 #ifndef atomic64_andnot
 static inline void atomic64_andnot(long long i, atomic64_t *v)
 {