}
 __setup("spin_retry=", spin_retry_setup);
 
+static inline int arch_load_niai4(int *lock)
+{
+       int owner;
+
+       asm volatile(
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+               "       .long   0xb2fa0040\n"   /* NIAI 4 */
+#endif
+               "       l       %0,%1\n"
+               : "=d" (owner) : "Q" (*lock) : "memory");
+       return owner;
+}
+
+static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
+{
+       int expected = old;
+
+       asm volatile(
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+               "       .long   0xb2fa0080\n"   /* NIAI 8 */
+#endif
+               "       cs      %0,%3,%1\n"
+               : "=d" (old), "=Q" (*lock)
+               : "0" (old), "d" (new), "Q" (*lock)
+               : "cc", "memory");
+       return expected == old;
+}
+
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
        int cpu = SPINLOCK_LOCKVAL;
-       int owner, count, first_diag;
+       int owner, count;
+
+       /* Pass the virtual CPU to the lock holder if it is not running */
+       owner = arch_load_niai4(&lp->lock);
+       if (owner && arch_vcpu_is_preempted(~owner))
+               smp_yield_cpu(~owner);
 
-       first_diag = 1;
+       count = spin_retry;
        while (1) {
-               owner = ACCESS_ONCE(lp->lock);
+               owner = arch_load_niai4(&lp->lock);
                /* Try to get the lock if it is free. */
                if (!owner) {
-                       if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
+                       if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
                                return;
                        continue;
                }
-               /* First iteration: check if the lock owner is running. */
-               if (first_diag && arch_vcpu_is_preempted(~owner)) {
-                       smp_yield_cpu(~owner);
-                       first_diag = 0;
+               if (count-- >= 0)
                        continue;
-               }
-               /* Loop for a while on the lock value. */
                count = spin_retry;
-               do {
-                       owner = ACCESS_ONCE(lp->lock);
-               } while (owner && count-- > 0);
-               if (!owner)
-                       continue;
                /*
                 * For multiple layers of hypervisors, e.g. z/VM + LPAR
                 * yield the CPU unconditionally. For LPAR rely on the
                 * sense running status.
                 */
-               if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
+               if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
                        smp_yield_cpu(~owner);
-                       first_diag = 0;
-               }
        }
 }
 EXPORT_SYMBOL(arch_spin_lock_wait);
 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 {
        int cpu = SPINLOCK_LOCKVAL;
-       int owner, count, first_diag;
+       int owner, count;
 
        local_irq_restore(flags);
-       first_diag = 1;
+
+       /* Pass the virtual CPU to the lock holder if it is not running */
+       owner = arch_load_niai4(&lp->lock);
+       if (owner && arch_vcpu_is_preempted(~owner))
+               smp_yield_cpu(~owner);
+
+       count = spin_retry;
        while (1) {
-               owner = ACCESS_ONCE(lp->lock);
+               owner = arch_load_niai4(&lp->lock);
                /* Try to get the lock if it is free. */
                if (!owner) {
                        local_irq_disable();
-                       if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
+                       if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
                                return;
                        local_irq_restore(flags);
                        continue;
                }
-               /* Check if the lock owner is running. */
-               if (first_diag && arch_vcpu_is_preempted(~owner)) {
-                       smp_yield_cpu(~owner);
-                       first_diag = 0;
+               if (count-- >= 0)
                        continue;
-               }
-               /* Loop for a while on the lock value. */
                count = spin_retry;
-               do {
-                       owner = ACCESS_ONCE(lp->lock);
-               } while (owner && count-- > 0);
-               if (!owner)
-                       continue;
                /*
                 * For multiple layers of hypervisors, e.g. z/VM + LPAR
                 * yield the CPU unconditionally. For LPAR rely on the
                 * sense running status.
                 */
-               if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
+               if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
                        smp_yield_cpu(~owner);
-                       first_diag = 0;
-               }
        }
 }
 EXPORT_SYMBOL(arch_spin_lock_wait_flags);