vfio: Introduce interface to flush virqfd inject workqueue
authorAlex Williamson <alex.williamson@redhat.com>
Fri, 8 Mar 2024 23:05:24 +0000 (16:05 -0700)
committerAlex Williamson <alex.williamson@redhat.com>
Mon, 11 Mar 2024 19:08:52 +0000 (13:08 -0600)
In order to synchronize changes that can affect the thread callback,
introduce an interface to force a flush of the inject workqueue.  The
irqfd pointer is only valid under spinlock, but the workqueue cannot
be flushed under spinlock.  Therefore the flush work for the irqfd is
queued under spinlock.  The vfio_irqfd_cleanup_wq workqueue is re-used
for queuing this work such that flushing the workqueue is also ordered
relative to shutdown.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/20240308230557.805580-4-alex.williamson@redhat.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
drivers/vfio/virqfd.c
include/linux/vfio.h

index 29c564b7a6e13e40c690ce3894f7873ca55bb240..53226913380197e38e021bb2efa7fc7979c323c7 100644 (file)
@@ -101,6 +101,13 @@ static void virqfd_inject(struct work_struct *work)
                virqfd->thread(virqfd->opaque, virqfd->data);
 }
 
+static void virqfd_flush_inject(struct work_struct *work)
+{
+       struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject);
+
+       flush_work(&virqfd->inject);
+}
+
 int vfio_virqfd_enable(void *opaque,
                       int (*handler)(void *, void *),
                       void (*thread)(void *, void *),
@@ -124,6 +131,7 @@ int vfio_virqfd_enable(void *opaque,
 
        INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
        INIT_WORK(&virqfd->inject, virqfd_inject);
+       INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
 
        irqfd = fdget(fd);
        if (!irqfd.file) {
@@ -213,3 +221,16 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd)
        flush_workqueue(vfio_irqfd_cleanup_wq);
 }
 EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
+
+void vfio_virqfd_flush_thread(struct virqfd **pvirqfd)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&virqfd_lock, flags);
+       if (*pvirqfd && (*pvirqfd)->thread)
+               queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject);
+       spin_unlock_irqrestore(&virqfd_lock, flags);
+
+       flush_workqueue(vfio_irqfd_cleanup_wq);
+}
+EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread);
index 89b265bc6ec315bcadadebcc92b5ea4ab283822f..8b1a2982040914052b423c73d7871ed71b563a09 100644 (file)
@@ -356,6 +356,7 @@ struct virqfd {
        wait_queue_entry_t              wait;
        poll_table              pt;
        struct work_struct      shutdown;
+       struct work_struct      flush_inject;
        struct virqfd           **pvirqfd;
 };
 
@@ -363,5 +364,6 @@ int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
                       void (*thread)(void *, void *), void *data,
                       struct virqfd **pvirqfd, int fd);
 void vfio_virqfd_disable(struct virqfd **pvirqfd);
+void vfio_virqfd_flush_thread(struct virqfd **pvirqfd);
 
 #endif /* VFIO_H */