KVM: arm64: vgic: Don't acquire the lpi_list_lock in vgic_put_irq()
authorOliver Upton <oliver.upton@linux.dev>
Wed, 21 Feb 2024 05:42:53 +0000 (05:42 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Fri, 23 Feb 2024 21:46:02 +0000 (21:46 +0000)
The LPI xarray's xa_lock is sufficient for synchronizing writers when
freeing a given LPI. Furthermore, readers can only take a new reference
on an IRQ if it was already nonzero.

Stop taking the lpi_list_lock unnecessarily and get rid of
__vgic_put_lpi_locked().

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240221054253.3848076-11-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic.c
arch/arm64/kvm/vgic/vgic.h

index dad6f0ee7c4927bf120ee5a6cba322fb02bfc876..f6025886071ca819c622ee5ed7ff2f8ba46b4254 100644 (file)
@@ -649,7 +649,7 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
         * was in the cache, and increment it on the new interrupt.
         */
        if (cte->irq)
-               __vgic_put_lpi_locked(kvm, cte->irq);
+               vgic_put_irq(kvm, cte->irq);
 
        /*
         * The irq refcount is guaranteed to be nonzero while holding the
@@ -686,7 +686,7 @@ void vgic_its_invalidate_cache(struct kvm *kvm)
                if (!cte->irq)
                        break;
 
-               __vgic_put_lpi_locked(kvm, cte->irq);
+               vgic_put_irq(kvm, cte->irq);
                cte->irq = NULL;
        }
 
index df9e1aa1956cab719feccce69899b34bffa8aea5..f963f410788af02ca456c2e78deb1cf1f967e8c4 100644 (file)
@@ -111,22 +111,6 @@ static void vgic_irq_release(struct kref *ref)
 {
 }
 
-/*
- * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
- */
-void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
-{
-       struct vgic_dist *dist = &kvm->arch.vgic;
-
-       if (!kref_put(&irq->refcount, vgic_irq_release))
-               return;
-
-       xa_erase(&dist->lpi_xa, irq->intid);
-       atomic_dec(&dist->lpi_count);
-
-       kfree_rcu(irq, rcu);
-}
-
 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
@@ -135,9 +119,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
        if (irq->intid < VGIC_MIN_LPI)
                return;
 
-       raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
-       __vgic_put_lpi_locked(kvm, irq);
-       raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+       if (!kref_put(&irq->refcount, vgic_irq_release))
+               return;
+
+       xa_lock_irqsave(&dist->lpi_xa, flags);
+       __xa_erase(&dist->lpi_xa, irq->intid);
+       xa_unlock_irqrestore(&dist->lpi_xa, flags);
+
+       atomic_dec(&dist->lpi_count);
+       kfree_rcu(irq, rcu);
 }
 
 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
index f874b9932c5a6c22fe0b806d242e2ab6c0ecf9bb..0c2b82de8fa3c723279695c8641de02e7775065d 100644 (file)
@@ -180,7 +180,6 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
                     gpa_t addr, int len);
 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
                              u32 intid);
-void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
 bool vgic_get_phys_line_level(struct vgic_irq *irq);
 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);