*/
        for (;;) {
                int val = atomic_read(&lock->val);
+               u8 old = 0;
 
                if (!(val & _Q_LOCKED_PENDING_MASK) &&
-                  (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
+                   try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) {
                        lockevent_inc(pv_lock_stealing);
                        return true;
                }
        int hopcnt = 0;
 
        for_each_hash_entry(he, offset, hash) {
+               struct qspinlock *old = NULL;
                hopcnt++;
-               if (!cmpxchg(&he->lock, NULL, lock)) {
+               if (try_cmpxchg(&he->lock, &old, lock)) {
                        WRITE_ONCE(he->node, node);
                        lockevent_pv_hop(hopcnt);
                        return &he->lock;
 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
 {
        struct pv_node *pn = (struct pv_node *)node;
-
+       enum vcpu_state old = vcpu_halted;
        /*
         * If the vCPU is indeed halted, advance its state to match that of
         * pv_wait_node(). If OTOH this fails, the vCPU was running and will
         * subsequent writes.
         */
        smp_mb__before_atomic();
-       if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
-           != vcpu_halted)
+       if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed))
                return;
 
        /*
 #ifndef __pv_queued_spin_unlock
 __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
 {
-       u8 locked;
+       u8 locked = _Q_LOCKED_VAL;
 
        /*
         * We must not unlock if SLOW, because in that case we must first
         * unhash. Otherwise it would be possible to have multiple @lock
         * entries, which would be BAD.
         */
-       locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
-       if (likely(locked == _Q_LOCKED_VAL))
+       if (try_cmpxchg_release(&lock->locked, &locked, 0))
                return;
 
        __pv_queued_spin_unlock_slowpath(lock, locked);