dmaengine: ti: edma: Support for interleaved mem to mem transfer
authorPeter Ujfalusi <peter.ujfalusi@ti.com>
Mon, 10 Feb 2020 09:44:55 +0000 (11:44 +0200)
committerVinod Koul <vkoul@kernel.org>
Tue, 25 Feb 2020 06:09:27 +0000 (11:39 +0530)
Add basic interleaved support via EDMA.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Link: https://lore.kernel.org/r/20200210094455.3615-1-peter.ujfalusi@ti.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/ti/edma.c

index 03a7f647f7b2c8fdd1a9c42752ece5e81e057bdf..2b1fdd438e7f2bcba2da5c399f904a3b04cfa1c8 100644 (file)
@@ -1275,6 +1275,81 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
        return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
 }
 
+static struct dma_async_tx_descriptor *
+edma_prep_dma_interleaved(struct dma_chan *chan,
+                         struct dma_interleaved_template *xt,
+                         unsigned long tx_flags)
+{
+       struct device *dev = chan->device->dev;
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct edmacc_param *param;
+       struct edma_desc *edesc;
+       size_t src_icg, dst_icg;
+       int src_bidx, dst_bidx;
+
+       /* Slave mode is not supported */
+       if (is_slave_direction(xt->dir))
+               return NULL;
+
+       if (xt->frame_size != 1 || xt->numf == 0)
+               return NULL;
+
+       if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
+               return NULL;
+
+       src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+       if (src_icg) {
+               src_bidx = src_icg + xt->sgl[0].size;
+       } else if (xt->src_inc) {
+               src_bidx = xt->sgl[0].size;
+       } else {
+               dev_err(dev, "%s: SRC constant addressing is not supported\n",
+                       __func__);
+               return NULL;
+       }
+
+       dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+       if (dst_icg) {
+               dst_bidx = dst_icg + xt->sgl[0].size;
+       } else if (xt->dst_inc) {
+               dst_bidx = xt->sgl[0].size;
+       } else {
+               dev_err(dev, "%s: DST constant addressing is not supported\n",
+                       __func__);
+               return NULL;
+       }
+
+       if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
+               return NULL;
+
+       edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
+       if (!edesc)
+               return NULL;
+
+       edesc->direction = DMA_MEM_TO_MEM;
+       edesc->echan = echan;
+       edesc->pset_nr = 1;
+
+       param = &edesc->pset[0].param;
+
+       param->src = xt->src_start;
+       param->dst = xt->dst_start;
+       param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
+       param->ccnt = 1;
+       param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+       param->src_dst_cidx = 0;
+
+       param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+       param->opt |= ITCCHEN;
+       /* Enable transfer complete interrupt if requested */
+       if (tx_flags & DMA_PREP_INTERRUPT)
+               param->opt |= TCINTEN;
+       else
+               edesc->polled = true;
+
+       return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
        struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
        size_t period_len, enum dma_transfer_direction direction,
@@ -1917,7 +1992,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
                         "Legacy memcpy is enabled, things might not work\n");
 
                dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
+               dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
                s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+               s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
                s_ddev->directions = BIT(DMA_MEM_TO_MEM);
        }
 
@@ -1953,8 +2030,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
 
                dma_cap_zero(m_ddev->cap_mask);
                dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
+               dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
 
                m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+               m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
                m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
                m_ddev->device_free_chan_resources = edma_free_chan_resources;
                m_ddev->device_issue_pending = edma_issue_pending;