riscv: atomic: Add custom conditional atomic operation implementation
authorGuo Ren <guoren@linux.alibaba.com>
Thu, 5 May 2022 03:55:24 +0000 (11:55 +0800)
committerPalmer Dabbelt <palmer@rivosinc.com>
Sat, 21 May 2022 17:31:47 +0000 (10:31 -0700)
Add conditional atomic operations' custom implementation (similar
to dec_if_positive), here is the list:
 - arch_atomic_inc_unless_negative
 - arch_atomic_dec_unless_positive
 - arch_atomic64_inc_unless_negative
 - arch_atomic64_dec_unless_positive

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20220505035526.2974382-4-guoren@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
arch/riscv/include/asm/atomic.h

index f3c6a6eac02a58715fddafbd0688c54f8f834ad4..0dfe9d857a762b6be9a5089b0a5a154a47f46d6a 100644 (file)
@@ -310,6 +310,46 @@ ATOMIC_OPS()
 #undef ATOMIC_OPS
 #undef ATOMIC_OP
 
+static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
+{
+       int prev, rc;
+
+       __asm__ __volatile__ (
+               "0:     lr.w      %[p],  %[c]\n"
+               "       bltz      %[p],  1f\n"
+               "       addi      %[rc], %[p], 1\n"
+               "       sc.w.rl   %[rc], %[rc], %[c]\n"
+               "       bnez      %[rc], 0b\n"
+               "       fence     rw, rw\n"
+               "1:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+               :
+               : "memory");
+       return !(prev < 0);
+}
+
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+
+static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
+{
+       int prev, rc;
+
+       __asm__ __volatile__ (
+               "0:     lr.w      %[p],  %[c]\n"
+               "       bgtz      %[p],  1f\n"
+               "       addi      %[rc], %[p], -1\n"
+               "       sc.w.rl   %[rc], %[rc], %[c]\n"
+               "       bnez      %[rc], 0b\n"
+               "       fence     rw, rw\n"
+               "1:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+               :
+               : "memory");
+       return !(prev > 0);
+}
+
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+
 static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
 {
        int prev, rc;
@@ -331,6 +371,48 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
 
 #ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+       s64 prev;
+       long rc;
+
+       __asm__ __volatile__ (
+               "0:     lr.d      %[p],  %[c]\n"
+               "       bltz      %[p],  1f\n"
+               "       addi      %[rc], %[p], 1\n"
+               "       sc.d.rl   %[rc], %[rc], %[c]\n"
+               "       bnez      %[rc], 0b\n"
+               "       fence     rw, rw\n"
+               "1:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+               :
+               : "memory");
+       return !(prev < 0);
+}
+
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+
+static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+       s64 prev;
+       long rc;
+
+       __asm__ __volatile__ (
+               "0:     lr.d      %[p],  %[c]\n"
+               "       bgtz      %[p],  1f\n"
+               "       addi      %[rc], %[p], -1\n"
+               "       sc.d.rl   %[rc], %[rc], %[c]\n"
+               "       bnez      %[rc], 0b\n"
+               "       fence     rw, rw\n"
+               "1:\n"
+               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+               :
+               : "memory");
+       return !(prev > 0);
+}
+
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+
 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        s64 prev;