KVM: arm64: vgic-its: Walk LPI xarray in its_sync_lpi_pending_table()
authorOliver Upton <oliver.upton@linux.dev>
Mon, 22 Apr 2024 20:01:41 +0000 (20:01 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 25 Apr 2024 12:19:55 +0000 (13:19 +0100)
The new LPI xarray makes it possible to walk the VM's LPIs without
holding a lock, meaning that vgic_copy_lpi_list() is no longer
necessary. Prepare for the deletion by walking the LPI xarray directly
in its_sync_lpi_pending_table().

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240422200158.2606761-3-oliver.upton@linux.dev
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/vgic/vgic-its.c

index e85a495ada9c193aa75ac4e2d404070d6390c758..bdb7718b923a2b021b72d6ffcd0595c97e06a68a 100644 (file)
@@ -446,23 +446,18 @@ static u32 max_lpis_propbaser(u64 propbaser)
 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 {
        gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       unsigned long intid, flags;
        struct vgic_irq *irq;
        int last_byte_offset = -1;
        int ret = 0;
-       u32 *intids;
-       int nr_irqs, i;
-       unsigned long flags;
        u8 pendmask;
 
-       nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
-       if (nr_irqs < 0)
-               return nr_irqs;
-
-       for (i = 0; i < nr_irqs; i++) {
+       xa_for_each(&dist->lpi_xa, intid, irq) {
                int byte_offset, bit_nr;
 
-               byte_offset = intids[i] / BITS_PER_BYTE;
-               bit_nr = intids[i] % BITS_PER_BYTE;
+               byte_offset = intid / BITS_PER_BYTE;
+               bit_nr = intid % BITS_PER_BYTE;
 
                /*
                 * For contiguously allocated LPIs chances are we just read
@@ -472,25 +467,23 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
                        ret = kvm_read_guest_lock(vcpu->kvm,
                                                  pendbase + byte_offset,
                                                  &pendmask, 1);
-                       if (ret) {
-                               kfree(intids);
+                       if (ret)
                                return ret;
-                       }
+
                        last_byte_offset = byte_offset;
                }
 
-               irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
+               irq = vgic_get_irq(vcpu->kvm, NULL, intid);
                if (!irq)
                        continue;
 
                raw_spin_lock_irqsave(&irq->irq_lock, flags);
-               irq->pending_latch = pendmask & (1U << bit_nr);
+               if (irq->target_vcpu == vcpu)
+                       irq->pending_latch = pendmask & (1U << bit_nr);
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 
-       kfree(intids);
-
        return ret;
 }