x86/percpu: Define raw_cpu_try_cmpxchg and this_cpu_try_cmpxchg()
authorUros Bizjak <ubizjak@gmail.com>
Wed, 30 Aug 2023 15:13:56 +0000 (17:13 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 15 Sep 2023 11:18:23 +0000 (13:18 +0200)
Define target-specific raw_cpu_try_cmpxchg_N() and
this_cpu_try_cmpxchg_N() macros. These definitions override
the generic fallback definitions and enable target-specific
optimized implementations.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230830151623.3900-1-ubizjak@gmail.com
arch/x86/include/asm/percpu.h

index 4c3641927f397cc5f26c7db462c4d487f7a5a6ea..a87db6140fe2ae8774b951368c653afc682b22b3 100644 (file)
@@ -210,6 +210,25 @@ do {                                                                       \
        (typeof(_var))(unsigned long) pco_old__;                        \
 })
 
+#define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval)         \
+({                                                                     \
+       bool success;                                                   \
+       __pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \
+       __pcpu_type_##size pco_old__ = *pco_oval__;                     \
+       __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);       \
+       asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]",               \
+                                   __percpu_arg([var]))                \
+                 CC_SET(z)                                             \
+                 : CC_OUT(z) (success),                                \
+                   [oval] "+a" (pco_old__),                            \
+                   [var] "+m" (_var)                                   \
+                 : [nval] __pcpu_reg_##size(, pco_new__)               \
+                 : "memory");                                          \
+       if (unlikely(!success))                                         \
+               *pco_oval__ = pco_old__;                                \
+       likely(success);                                                \
+})
+
 #if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
 #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval)            \
 ({                                                                     \
@@ -410,6 +429,9 @@ do {                                                                        \
 #define raw_cpu_cmpxchg_1(pcp, oval, nval)     percpu_cmpxchg_op(1, , pcp, oval, nval)
 #define raw_cpu_cmpxchg_2(pcp, oval, nval)     percpu_cmpxchg_op(2, , pcp, oval, nval)
 #define raw_cpu_cmpxchg_4(pcp, oval, nval)     percpu_cmpxchg_op(4, , pcp, oval, nval)
+#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval)        percpu_try_cmpxchg_op(1, , pcp, ovalp, nval)
+#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval)        percpu_try_cmpxchg_op(2, , pcp, ovalp, nval)
+#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval)        percpu_try_cmpxchg_op(4, , pcp, ovalp, nval)
 
 #define this_cpu_add_return_1(pcp, val)                percpu_add_return_op(1, volatile, pcp, val)
 #define this_cpu_add_return_2(pcp, val)                percpu_add_return_op(2, volatile, pcp, val)
@@ -417,6 +439,9 @@ do {                                                                        \
 #define this_cpu_cmpxchg_1(pcp, oval, nval)    percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
 #define this_cpu_cmpxchg_2(pcp, oval, nval)    percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
 #define this_cpu_cmpxchg_4(pcp, oval, nval)    percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
+#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval)       percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval)
+#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval)       percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval)
+#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval)       percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
 
 /*
  * Per cpu atomic 64 bit operations are only available under 64 bit.
@@ -431,6 +456,7 @@ do {                                                                        \
 #define raw_cpu_add_return_8(pcp, val)         percpu_add_return_op(8, , pcp, val)
 #define raw_cpu_xchg_8(pcp, nval)              raw_percpu_xchg_op(pcp, nval)
 #define raw_cpu_cmpxchg_8(pcp, oval, nval)     percpu_cmpxchg_op(8, , pcp, oval, nval)
+#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval)        percpu_try_cmpxchg_op(8, , pcp, ovalp, nval)
 
 #define this_cpu_read_8(pcp)                   percpu_from_op(8, volatile, "mov", pcp)
 #define this_cpu_write_8(pcp, val)             percpu_to_op(8, volatile, "mov", (pcp), val)
@@ -440,6 +466,7 @@ do {                                                                        \
 #define this_cpu_add_return_8(pcp, val)                percpu_add_return_op(8, volatile, pcp, val)
 #define this_cpu_xchg_8(pcp, nval)             percpu_xchg_op(8, volatile, pcp, nval)
 #define this_cpu_cmpxchg_8(pcp, oval, nval)    percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
+#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval)       percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
 #endif
 
 static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,