dmaengine: imx-sdma: Support allocate memory from internal SRAM (iram)
authorNicolin Chen <b42378@freescale.com>
Fri, 29 Mar 2024 14:34:41 +0000 (10:34 -0400)
committerVinod Koul <vkoul@kernel.org>
Sun, 7 Apr 2024 11:19:25 +0000 (16:49 +0530)
Allocate memory from SoC internal SRAM to reduce DDR access and keep DDR in
lower power state (such as self-referesh) longer.

Check iram_pool before sdma_init() so that ccb/context could be allocated
from iram because DDR maybe in self-referesh in lower power audio case
while sdma still running.

Reviewed-by: Shengjiu Wang <shengjiu.wang@nxp.com>
Signed-off-by: Nicolin Chen <b42378@freescale.com>
Signed-off-by: Joy Zou <joy.zou@nxp.com>
Reviewed-by: Daniel Baluta <daniel.baluta@nxp.com>
Signed-off-by: Frank Li <Frank.Li@nxp.com>
Link: https://lore.kernel.org/r/20240329-sdma_upstream-v4-1-daeb3067dea7@nxp.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/imx-sdma.c

index 9b42f5e96b1e0a2b7d001fa406a7d98cd040a577..4f1a9d1b152d66402a5032137d9ca055c11093f3 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/semaphore.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/genalloc.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
 #include <linux/slab.h>
@@ -531,6 +532,7 @@ struct sdma_engine {
        /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
        bool                            clk_ratio;
        bool                            fw_loaded;
+       struct gen_pool                 *iram_pool;
 };
 
 static int sdma_config_write(struct dma_chan *chan,
@@ -1358,8 +1360,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
 {
        int ret = -EBUSY;
 
-       sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
-                                      GFP_NOWAIT);
+       if (sdma->iram_pool)
+               sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool,
+                                       sizeof(struct sdma_buffer_descriptor),
+                                       &sdma->bd0_phys);
+       else
+               sdma->bd0 = dma_alloc_coherent(sdma->dev,
+                                       sizeof(struct sdma_buffer_descriptor),
+                                       &sdma->bd0_phys, GFP_NOWAIT);
        if (!sdma->bd0) {
                ret = -ENOMEM;
                goto out;
@@ -1379,10 +1387,14 @@ out:
 static int sdma_alloc_bd(struct sdma_desc *desc)
 {
        u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+       struct sdma_engine *sdma = desc->sdmac->sdma;
        int ret = 0;
 
-       desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
-                                     &desc->bd_phys, GFP_NOWAIT);
+       if (sdma->iram_pool)
+               desc->bd = gen_pool_dma_alloc(sdma->iram_pool, bd_size, &desc->bd_phys);
+       else
+               desc->bd = dma_alloc_coherent(sdma->dev, bd_size, &desc->bd_phys, GFP_NOWAIT);
+
        if (!desc->bd) {
                ret = -ENOMEM;
                goto out;
@@ -1394,9 +1406,12 @@ out:
 static void sdma_free_bd(struct sdma_desc *desc)
 {
        u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+       struct sdma_engine *sdma = desc->sdmac->sdma;
 
-       dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
-                         desc->bd_phys);
+       if (sdma->iram_pool)
+               gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd, bd_size);
+       else
+               dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, desc->bd_phys);
 }
 
 static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -2068,6 +2083,7 @@ static int sdma_init(struct sdma_engine *sdma)
 {
        int i, ret;
        dma_addr_t ccb_phys;
+       int ccbsize;
 
        ret = clk_enable(sdma->clk_ipg);
        if (ret)
@@ -2083,10 +2099,14 @@ static int sdma_init(struct sdma_engine *sdma)
        /* Be sure SDMA has not started yet */
        writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
 
-       sdma->channel_control = dma_alloc_coherent(sdma->dev,
-                       MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
-                       sizeof(struct sdma_context_data),
-                       &ccb_phys, GFP_KERNEL);
+       ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control)
+                 + sizeof(struct sdma_context_data));
+
+       if (sdma->iram_pool)
+               sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
+       else
+               sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize, &ccb_phys,
+                                                          GFP_KERNEL);
 
        if (!sdma->channel_control) {
                ret = -ENOMEM;
@@ -2272,6 +2292,12 @@ static int sdma_probe(struct platform_device *pdev)
                        vchan_init(&sdmac->vc, &sdma->dma_device);
        }
 
+       if (np) {
+               sdma->iram_pool = of_gen_pool_get(np, "iram", 0);
+               if (sdma->iram_pool)
+                       dev_info(&pdev->dev, "alloc bd from iram.\n");
+       }
+
        ret = sdma_init(sdma);
        if (ret)
                goto err_init;