From: Joao Martins Date: Tue, 30 May 2023 14:11:33 +0000 (-0400) Subject: iommu/amd: Switch amd_iommu_update_ga() to use modify_irte_ga() X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=a42f0c7a4118ffa395866cc7f5522d71a86fc4dd;p=linux.git iommu/amd: Switch amd_iommu_update_ga() to use modify_irte_ga() The modify_irte_ga() uses cmpxchg_double() to update the IRTE in one shot, which is necessary when adding IRTE cache disabling support since the driver no longer need to flush the IRT for hardware to take effect. Please note that there is a functional change where the IsRun and Destination bits of IRTE are now cached in the struct amd_ir_data.entry. Reviewed-by: Jerry Snitselaar Signed-off-by: Joao Martins Signed-off-by: Suravee Suthikulpanit Link: https://lore.kernel.org/r/20230530141137.14376-2-suravee.suthikulpanit@amd.com Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 4a314647d1f79..12d6844128f99 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3702,44 +3702,26 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) int amd_iommu_update_ga(int cpu, bool is_run, void *data) { - unsigned long flags; - struct amd_iommu *iommu; - struct irq_remap_table *table; struct amd_ir_data *ir_data = (struct amd_ir_data *)data; - int devid = ir_data->irq_2_irte.devid; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; - struct irte_ga *ref = (struct irte_ga *) ir_data->ref; if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || - !ref || !entry || !entry->lo.fields_vapic.guest_mode) + !entry || !entry->lo.fields_vapic.guest_mode) return 0; - iommu = ir_data->iommu; - if (!iommu) - return -ENODEV; - - table = get_irq_table(iommu, devid); - if (!table) + if (!ir_data->iommu) return -ENODEV; - raw_spin_lock_irqsave(&table->lock, flags); - - if (ref->lo.fields_vapic.guest_mode) { - if (cpu >= 0) { - ref->lo.fields_vapic.destination = - APICID_TO_IRTE_DEST_LO(cpu); - ref->hi.fields.destination = - APICID_TO_IRTE_DEST_HI(cpu); - } - ref->lo.fields_vapic.is_run = is_run; - barrier(); + if (cpu >= 0) { + entry->lo.fields_vapic.destination = + APICID_TO_IRTE_DEST_LO(cpu); + entry->hi.fields.destination = + APICID_TO_IRTE_DEST_HI(cpu); } + entry->lo.fields_vapic.is_run = is_run; - raw_spin_unlock_irqrestore(&table->lock, flags); - - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); - return 0; + return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry, ir_data); } EXPORT_SYMBOL(amd_iommu_update_ga); #endif