The last genuine use case for the lpi_list_lock was the global LPI
translation cache, which has been removed in favor of a per-ITS xarray.
Remove a layer from the locking puzzle by getting rid of it.
vgic_add_lpi() still has a critical section that needs to protect
against the insertion of other LPIs; change it to take the LPI xarray's
xa_lock to retain this property.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240422200158.2606761-13-oliver.upton@linux.dev
Signed-off-by: Marc Zyngier <maz@kernel.org>
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
 
-       raw_spin_lock_init(&dist->lpi_list_lock);
        xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
 }
 
 
        irq->target_vcpu = vcpu;
        irq->group = 1;
 
-       raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+       xa_lock_irqsave(&dist->lpi_xa, flags);
 
        /*
         * There could be a race with another vgic_add_lpi(), so we need to
                goto out_unlock;
        }
 
-       ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0));
+       ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
        if (ret) {
                xa_release(&dist->lpi_xa, intid);
                kfree(irq);
        }
 
 out_unlock:
-       raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+       xa_unlock_irqrestore(&dist->lpi_xa, flags);
 
        if (ret)
                return ERR_PTR(ret);
 
  *       its->cmd_lock (mutex)
  *         its->its_lock (mutex)
  *           vgic_cpu->ap_list_lock            must be taken with IRQs disabled
- *             kvm->lpi_list_lock              must be taken with IRQs disabled
- *               vgic_dist->lpi_xa.xa_lock     must be taken with IRQs disabled
- *                 vgic_irq->irq_lock          must be taken with IRQs disabled
+ *             vgic_dist->lpi_xa.xa_lock       must be taken with IRQs disabled
+ *               vgic_irq->irq_lock            must be taken with IRQs disabled
  *
  * As the ap_list_lock might be taken from the timer interrupt handler,
  * we have to disable IRQs before taking this lock and everything lower
 
         */
        u64                     propbaser;
 
-       /* Protects the lpi_list. */
-       raw_spinlock_t          lpi_list_lock;
-
 #define LPI_XA_MARK_DEBUG_ITER XA_MARK_0
        struct xarray           lpi_xa;