KVM: arm64: vgic-its: Scope translation cache invalidations to an ITS
authorOliver Upton <oliver.upton@linux.dev>
Mon, 22 Apr 2024 20:01:46 +0000 (20:01 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 25 Apr 2024 12:19:55 +0000 (13:19 +0100)
As the current LPI translation cache is global, the corresponding
invalidation helpers are also globally-scoped. In anticipation of
constructing a translation cache per ITS, add a helper for scoped cache
invalidations.

We still need to support global invalidations when LPIs are toggled on
a redistributor, as a property of the translation cache is that all
stored LPIs are known to be delieverable.

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240422200158.2606761-8-oliver.upton@linux.dev
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/kvm/vgic/vgic.h

index 441134ad674e28a7cf378b1b2196235cfaf20074..2caa30bf20c75eeedc9c0b6873f46876141bb1f7 100644 (file)
@@ -23,6 +23,8 @@
 #include "vgic.h"
 #include "vgic-mmio.h"
 
+static struct kvm_device_ops kvm_arm_vgic_its_ops;
+
 static int vgic_its_save_tables_v0(struct vgic_its *its);
 static int vgic_its_restore_tables_v0(struct vgic_its *its);
 static int vgic_its_commit_v0(struct vgic_its *its);
@@ -616,8 +618,9 @@ out:
        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 }
 
-void vgic_its_invalidate_cache(struct kvm *kvm)
+static void vgic_its_invalidate_cache(struct vgic_its *its)
 {
+       struct kvm *kvm = its->dev->kvm;
        struct vgic_dist *dist = &kvm->arch.vgic;
        struct vgic_translation_cache_entry *cte;
        unsigned long flags;
@@ -639,6 +642,24 @@ void vgic_its_invalidate_cache(struct kvm *kvm)
        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 }
 
+void vgic_its_invalidate_all_caches(struct kvm *kvm)
+{
+       struct kvm_device *dev;
+       struct vgic_its *its;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(dev, &kvm->devices, vm_node) {
+               if (dev->ops != &kvm_arm_vgic_its_ops)
+                       continue;
+
+               its = dev->private;
+               vgic_its_invalidate_cache(its);
+       }
+
+       rcu_read_unlock();
+}
+
 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
                         u32 devid, u32 eventid, struct vgic_irq **irq)
 {
@@ -826,7 +847,7 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
                 * don't bother here since we clear the ITTE anyway and the
                 * pending state is a property of the ITTE struct.
                 */
-               vgic_its_invalidate_cache(kvm);
+               vgic_its_invalidate_cache(its);
 
                its_free_ite(kvm, ite);
                return 0;
@@ -863,7 +884,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
        ite->collection = collection;
        vcpu = collection_to_vcpu(kvm, collection);
 
-       vgic_its_invalidate_cache(kvm);
+       vgic_its_invalidate_cache(its);
 
        return update_affinity(ite->irq, vcpu);
 }
@@ -1110,7 +1131,8 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
 }
 
 /* Requires the its_lock to be held. */
-static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
+static void vgic_its_free_device(struct kvm *kvm, struct vgic_its *its,
+                                struct its_device *device)
 {
        struct its_ite *ite, *temp;
 
@@ -1122,7 +1144,7 @@ static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
        list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
                its_free_ite(kvm, ite);
 
-       vgic_its_invalidate_cache(kvm);
+       vgic_its_invalidate_cache(its);
 
        list_del(&device->dev_list);
        kfree(device);
@@ -1134,7 +1156,7 @@ static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
        struct its_device *cur, *temp;
 
        list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
-               vgic_its_free_device(kvm, cur);
+               vgic_its_free_device(kvm, its, cur);
 }
 
 /* its lock must be held */
@@ -1193,7 +1215,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
         * by removing the mapping and re-establishing it.
         */
        if (device)
-               vgic_its_free_device(kvm, device);
+               vgic_its_free_device(kvm, its, device);
 
        /*
         * The spec does not say whether unmapping a not-mapped device
@@ -1224,7 +1246,7 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
 
        if (!valid) {
                vgic_its_free_collection(its, coll_id);
-               vgic_its_invalidate_cache(kvm);
+               vgic_its_invalidate_cache(its);
        } else {
                struct kvm_vcpu *vcpu;
 
@@ -1395,7 +1417,7 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
                vgic_put_irq(kvm, irq);
        }
 
-       vgic_its_invalidate_cache(kvm);
+       vgic_its_invalidate_cache(its);
 
        return 0;
 }
@@ -1747,7 +1769,7 @@ static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
 
        its->enabled = !!(val & GITS_CTLR_ENABLE);
        if (!its->enabled)
-               vgic_its_invalidate_cache(kvm);
+               vgic_its_invalidate_cache(its);
 
        /*
         * Try to process any pending commands. This function bails out early
@@ -1880,7 +1902,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
        struct vgic_dist *dist = &kvm->arch.vgic;
        struct vgic_translation_cache_entry *cte, *tmp;
 
-       vgic_its_invalidate_cache(kvm);
+       vgic_its_invalidate_all_caches(kvm);
 
        list_for_each_entry_safe(cte, tmp,
                                 &dist->lpi_translation_cache, entry) {
@@ -2372,7 +2394,7 @@ static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
 
        ret = vgic_its_restore_itt(its, dev);
        if (ret) {
-               vgic_its_free_device(its->dev->kvm, dev);
+               vgic_its_free_device(its->dev->kvm, its, dev);
                return ret;
        }
 
index c15ee1df036a22b42d0059cfe546857450fb91d7..a3983a631b5ad1cafce9bb5ec91bcc19a20c250f 100644 (file)
@@ -277,7 +277,7 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
                        return;
 
                vgic_flush_pending_lpis(vcpu);
-               vgic_its_invalidate_cache(vcpu->kvm);
+               vgic_its_invalidate_all_caches(vcpu->kvm);
                atomic_set_release(&vgic_cpu->ctlr, 0);
        } else {
                ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr, 0,
index 060dfd96b41fed282b931e3eb750134c24728ed3..e5cda1eb4bcf4c6b383c47b54a220c5239e3ea79 100644 (file)
@@ -337,7 +337,7 @@ struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
 int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
 void vgic_lpi_translation_cache_init(struct kvm *kvm);
 void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
-void vgic_its_invalidate_cache(struct kvm *kvm);
+void vgic_its_invalidate_all_caches(struct kvm *kvm);
 
 /* GICv4.1 MMIO interface */
 int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq);