dmaengine: at_xdmac: Remove a level of indentation in at_xdmac_tasklet()
authorTudor Ambarus <tudor.ambarus@microchip.com>
Wed, 15 Dec 2021 11:01:14 +0000 (13:01 +0200)
committerVinod Koul <vkoul@kernel.org>
Wed, 5 Jan 2022 10:20:04 +0000 (15:50 +0530)
Apart of making the code easier to read, this patch is a prerequisite for
a functional change: tasklets run with interrupts enabled, so we need to
protect atchan->irq_status with spin_lock_irq() otherwise the tasklet can
be interrupted by the IRQ that modifies irq_status. atchan->irq_status
will be protected in a further patch.

Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Link: https://lore.kernel.org/r/20211215110115.191749-12-tudor.ambarus@microchip.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/at_xdmac.c

index abe8c4615e6526854f962e7387c4d3f9dd99e951..ba727751a9f659dffdbf9524cfc0c211b151c8f0 100644 (file)
@@ -1667,53 +1667,51 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
 {
        struct at_xdmac_chan    *atchan = from_tasklet(atchan, t, tasklet);
        struct at_xdmac_desc    *desc;
+       struct dma_async_tx_descriptor *txd;
        u32                     error_mask;
 
        dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
                __func__, atchan->irq_status);
 
-       error_mask = AT_XDMAC_CIS_RBEIS
-                    | AT_XDMAC_CIS_WBEIS
-                    | AT_XDMAC_CIS_ROIS;
-
-       if (at_xdmac_chan_is_cyclic(atchan)) {
-               at_xdmac_handle_cyclic(atchan);
-       } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
-                  || (atchan->irq_status & error_mask)) {
-               struct dma_async_tx_descriptor  *txd;
-
-               if (atchan->irq_status & error_mask)
-                       at_xdmac_handle_error(atchan);
-
-               spin_lock_irq(&atchan->lock);
-               desc = list_first_entry(&atchan->xfers_list,
-                                       struct at_xdmac_desc,
-                                       xfer_node);
-               dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
-               if (!desc->active_xfer) {
-                       dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
-                       spin_unlock_irq(&atchan->lock);
-                       return;
-               }
+       if (at_xdmac_chan_is_cyclic(atchan))
+               return at_xdmac_handle_cyclic(atchan);
 
-               txd = &desc->tx_dma_desc;
-               dma_cookie_complete(txd);
-               /* Remove the transfer from the transfer list. */
-               list_del(&desc->xfer_node);
-               spin_unlock_irq(&atchan->lock);
+       error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
+               AT_XDMAC_CIS_ROIS;
 
-               if (txd->flags & DMA_PREP_INTERRUPT)
-                       dmaengine_desc_get_callback_invoke(txd, NULL);
+       if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
+           !(atchan->irq_status & error_mask))
+               return;
 
-               dma_run_dependencies(txd);
+       if (atchan->irq_status & error_mask)
+               at_xdmac_handle_error(atchan);
 
-               spin_lock_irq(&atchan->lock);
-               /* Move the xfer descriptors into the free descriptors list. */
-               list_splice_tail_init(&desc->descs_list,
-                                     &atchan->free_descs_list);
-               at_xdmac_advance_work(atchan);
+       spin_lock_irq(&atchan->lock);
+       desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
+                               xfer_node);
+       dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+       if (!desc->active_xfer) {
+               dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
                spin_unlock_irq(&atchan->lock);
+               return;
        }
+
+       txd = &desc->tx_dma_desc;
+       dma_cookie_complete(txd);
+       /* Remove the transfer from the transfer list. */
+       list_del(&desc->xfer_node);
+       spin_unlock_irq(&atchan->lock);
+
+       if (txd->flags & DMA_PREP_INTERRUPT)
+               dmaengine_desc_get_callback_invoke(txd, NULL);
+
+       dma_run_dependencies(txd);
+
+       spin_lock_irq(&atchan->lock);
+       /* Move the xfer descriptors into the free descriptors list. */
+       list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
+       at_xdmac_advance_work(atchan);
+       spin_unlock_irq(&atchan->lock);
 }
 
 static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)