locking/pvqspinlock/x86: Remove redundant CMP after CMPXCHG in __raw_callee_save___pv...
authorUros Bizjak <ubizjak@gmail.com>
Fri, 12 Apr 2024 08:38:53 +0000 (10:38 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 12 Apr 2024 09:42:39 +0000 (11:42 +0200)
x86 CMPXCHG instruction returns success in the ZF flag. Remove
redundant CMP instruction after CMPXCHG that performs the same check.

Also update the function comment to mention the modern version
of the equivalent C code.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20240412083908.282802-1-ubizjak@gmail.com
arch/x86/include/asm/qspinlock_paravirt.h

index ef9697f20129af1bcb54bc75dc37095d4837e6b8..466af57b8ed6aa1b1eef890d0b38b91cd5712781 100644 (file)
@@ -25,9 +25,9 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
  *
  * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
  * {
- *     u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
+ *     u8 lockval = _Q_LOCKED_VAL;
  *
- *     if (likely(lockval == _Q_LOCKED_VAL))
+ *     if (try_cmpxchg(&lock->locked, &lockval, 0))
  *             return;
  *     pv_queued_spin_unlock_slowpath(lock, lockval);
  * }
@@ -43,7 +43,6 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
        "mov   $0x1,%eax\n\t"                                           \
        "xor   %edx,%edx\n\t"                                           \
        LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t"                            \
-       "cmp   $0x1,%al\n\t"                                            \
        "jne   .slowpath\n\t"                                           \
        "pop   %rdx\n\t"                                                \
        FRAME_END                                                       \