dmaengine: idxd: add missing callback function to support DMA_INTERRUPT
authorDave Jiang <dave.jiang@intel.com>
Tue, 26 Apr 2022 22:32:06 +0000 (15:32 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 14 Jun 2022 16:36:28 +0000 (18:36 +0200)
commit 2112b8f4fb5cc35d1c384324763765953186b81f upstream.

When setting DMA_INTERRUPT capability, a callback function
dma->device_prep_dma_interrupt() is needed to support this capability.
Without setting the callback, dma_async_device_register() will fail dma
capability check.

Fixes: 4e5a4eb20393 ("dmaengine: idxd: set DMA_INTERRUPT cap bit")
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/165101232637.3951447.15765792791591763119.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/dma/idxd/dma.c

index acb5681e25eddfa19e2169fb812a38d07576fc1c..29af898f3c24287ef892891377b7fab987f06c30 100644 (file)
@@ -77,6 +77,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
        hw->completion_addr = compl;
 }
 
+static struct dma_async_tx_descriptor *
+idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
+{
+       struct idxd_wq *wq = to_idxd_wq(c);
+       u32 desc_flags;
+       struct idxd_desc *desc;
+
+       if (wq->state != IDXD_WQ_ENABLED)
+               return NULL;
+
+       op_flag_setup(flags, &desc_flags);
+       desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+       if (IS_ERR(desc))
+               return NULL;
+
+       idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
+                             0, 0, 0, desc->compl_dma, desc_flags);
+       desc->txd.flags = flags;
+       return &desc->txd;
+}
+
 static struct dma_async_tx_descriptor *
 idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
                       dma_addr_t dma_src, size_t len, unsigned long flags)
@@ -186,6 +207,7 @@ int idxd_register_dma_device(struct idxd_device *idxd)
        dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
        dma->device_release = idxd_dma_release;
 
+       dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
        if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
                dma_cap_set(DMA_MEMCPY, dma->cap_mask);
                dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;