{
struct vgic_dist *dist = &kvm->arch.vgic;
- INIT_LIST_HEAD(&dist->lpi_list_head);
INIT_LIST_HEAD(&dist->lpi_translation_cache);
raw_spin_lock_init(&dist->lpi_list_lock);
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
return ERR_PTR(ret);
}
- INIT_LIST_HEAD(&irq->lpi_list);
INIT_LIST_HEAD(&irq->ap_list);
raw_spin_lock_init(&irq->irq_lock);
* There could be a race with another vgic_add_lpi(), so we need to
* check that we don't add a second list entry with the same LPI.
*/
- list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
- if (oldirq->intid != intid)
- continue;
-
+ oldirq = xa_load(&dist->lpi_xa, intid);
+ if (oldirq) {
/* Someone was faster with adding this LPI, lets use that. */
kfree(irq);
irq = oldirq;
goto out_unlock;
}
- list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
dist->lpi_list_count++;
out_unlock:
if (!kref_put(&irq->refcount, vgic_irq_release))
return;
- list_del(&irq->lpi_list);
xa_erase(&dist->lpi_xa, irq->intid);
dist->lpi_list_count--;
struct vgic_irq {
raw_spinlock_t irq_lock; /* Protects the content of the struct */
- struct list_head lpi_list; /* Used to link all LPIs together */
struct list_head ap_list;
struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
/* Protects the lpi_list and the count value below. */
raw_spinlock_t lpi_list_lock;
struct xarray lpi_xa;
- struct list_head lpi_list_head;
int lpi_list_count;
/* LPI translation cache */