scsi: fnic: Move fnic_fnic_flush_tx() to a work queue
authorLee Duncan <lduncan@suse.com>
Fri, 9 Feb 2024 18:07:35 +0000 (10:07 -0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Tue, 13 Feb 2024 01:50:07 +0000 (20:50 -0500)
Rather than call 'fnic_flush_tx()' from interrupt context we should be
moving it onto a work queue to avoid any locking issues.

Fixes: 1a1975551943 ("scsi: fcoe: Fix potential deadlock on &fip->ctlr_lock")
Co-developed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Lee Duncan <lduncan@suse.com>
Link: https://lore.kernel.org/r/ce5ffa5d0ff82c2b2e283b3b4bff23291d49b05c.1707500786.git.lduncan@suse.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c

index 2074937c05bc855dea5a580079b84fd677460fb5..ce73f08ee889f1409c43583d959baeae6f0fb895 100644 (file)
@@ -305,6 +305,7 @@ struct fnic {
        unsigned int copy_wq_base;
        struct work_struct link_work;
        struct work_struct frame_work;
+       struct work_struct flush_work;
        struct sk_buff_head frame_queue;
        struct sk_buff_head tx_queue;
 
@@ -363,7 +364,7 @@ void fnic_handle_event(struct work_struct *work);
 int fnic_rq_cmpl_handler(struct fnic *fnic, int);
 int fnic_alloc_rq_frame(struct vnic_rq *rq);
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
-void fnic_flush_tx(struct fnic *);
+void fnic_flush_tx(struct work_struct *work);
 void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
 void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
 void fnic_update_mac(struct fc_lport *, u8 *new);
index 5e312a55cc7da0c73811b0fc26aed17d8c6a034c..a08293b2ad9f59031d5220aba3480a84461a0e8a 100644 (file)
@@ -1182,7 +1182,7 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
 
 /**
  * fnic_flush_tx() - send queued frames.
- * @fnic: fnic device
+ * @work: pointer to work element
  *
  * Send frames that were waiting to go out in FC or Ethernet mode.
  * Whenever changing modes we purge queued frames, so these frames should
@@ -1190,8 +1190,9 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  *
  * Called without fnic_lock held.
  */
-void fnic_flush_tx(struct fnic *fnic)
+void fnic_flush_tx(struct work_struct *work)
 {
+       struct fnic *fnic = container_of(work, struct fnic, flush_work);
        struct sk_buff *skb;
        struct fc_frame *fp;
 
index 5ed1d897311a88c0d1194bff7b36e9677a45166f..29eead383eb9a478bb71643eaac1a4e302418f0f 100644 (file)
@@ -830,6 +830,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                spin_lock_init(&fnic->vlans_lock);
                INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
                INIT_WORK(&fnic->event_work, fnic_handle_event);
+               INIT_WORK(&fnic->flush_work, fnic_flush_tx);
                skb_queue_head_init(&fnic->fip_frame_queue);
                INIT_LIST_HEAD(&fnic->evlist);
                INIT_LIST_HEAD(&fnic->vlans);
index 8d7fc5284293b5283523b049ba38387857ebb09e..fc4cee91b175c14d0950337ac8928b659cbc8cef 100644 (file)
@@ -680,7 +680,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
 
        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-       fnic_flush_tx(fnic);
+       queue_work(fnic_event_queue, &fnic->flush_work);
 
  reset_cmpl_handler_end:
        fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
@@ -736,7 +736,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
                }
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-               fnic_flush_tx(fnic);
+               queue_work(fnic_event_queue, &fnic->flush_work);
                queue_work(fnic_event_queue, &fnic->frame_work);
        } else {
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);