drm/msm: move msm_gpummu.c to adreno/a2xx_gpummu.c
authorDmitry Baryshkov <dmitry.baryshkov@linaro.org>
Mon, 1 Apr 2024 02:42:34 +0000 (05:42 +0300)
committerDmitry Baryshkov <dmitry.baryshkov@linaro.org>
Mon, 22 Apr 2024 13:22:49 +0000 (16:22 +0300)
The msm_gpummu.c implementation is used only on A2xx and it is tied to
the A2xx registers. Rename the source file accordingly.

Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Patchwork: https://patchwork.freedesktop.org/patch/585846/
Link: https://lore.kernel.org/r/20240401-fd-xml-shipped-v5-4-4bdb277a85a1@linaro.org
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a2xx_gpu.h
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_gpummu.c [deleted file]
drivers/gpu/drm/msm/msm_mmu.h

index b21ae2880c7159fc1b6e39ff003f07db4496095e..26ed4f4431494cdfd47093aa225a928481b32ab0 100644 (file)
@@ -8,6 +8,7 @@ msm-y := \
        adreno/adreno_device.o \
        adreno/adreno_gpu.o \
        adreno/a2xx_gpu.o \
+       adreno/a2xx_gpummu.o \
        adreno/a3xx_gpu.o \
        adreno/a4xx_gpu.o \
        adreno/a5xx_gpu.o \
@@ -113,7 +114,6 @@ msm-y += \
        msm_ringbuffer.o \
        msm_submitqueue.o \
        msm_gpu_tracepoints.o \
-       msm_gpummu.o
 
 msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
        dp/dp_debug.o
index 0d8133f3174beb1ece9cfb76f77379b03eb46dcf..0dc255ddf5ceba87090f64d5cb9f078b61104063 100644 (file)
@@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
        uint32_t *ptr, len;
        int i, ret;
 
-       msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+       a2xx_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
 
        DBG("%s", gpu->name);
 
@@ -469,7 +469,7 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
 static struct msm_gem_address_space *
 a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
 {
-       struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
+       struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu);
        struct msm_gem_address_space *aspace;
 
        aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
index 161a075f94af202226af914d098da910db1d4184..53702f19990f39b1d3d9a30e2e02792f5c23968d 100644 (file)
@@ -19,4 +19,8 @@ struct a2xx_gpu {
 };
 #define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
 
+struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+               dma_addr_t *tran_error);
+
 #endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
new file mode 100644 (file)
index 0000000..3964155
--- /dev/null
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/dma-mapping.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+#include "adreno_gpu.h"
+#include "a2xx_gpu.h"
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpummu {
+       struct msm_mmu base;
+       struct msm_gpu *gpu;
+       dma_addr_t pt_base;
+       uint32_t *table;
+};
+#define to_a2xx_gpummu(x) container_of(x, struct a2xx_gpummu, base)
+
+#define GPUMMU_VA_START SZ_16M
+#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
+#define GPUMMU_PAGE_SIZE SZ_4K
+#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
+
+static void a2xx_gpummu_detach(struct msm_mmu *mmu)
+{
+}
+
+static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+               struct sg_table *sgt, size_t len, int prot)
+{
+       struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
+       unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+       struct sg_dma_page_iter dma_iter;
+       unsigned prot_bits = 0;
+
+       if (prot & IOMMU_WRITE)
+               prot_bits |= 1;
+       if (prot & IOMMU_READ)
+               prot_bits |= 2;
+
+       for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+               dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
+               int i;
+
+               for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
+                       gpummu->table[idx++] = (addr + i) | prot_bits;
+       }
+
+       /* we can improve by deferring flush for multiple map() */
+       gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+       return 0;
+}
+
+static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
+{
+       struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
+       unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+       unsigned i;
+
+       for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
+                gpummu->table[idx] = 0;
+
+       gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+       return 0;
+}
+
+static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu)
+{
+}
+
+static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
+{
+       struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
+
+       dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+               DMA_ATTR_FORCE_CONTIGUOUS);
+
+       kfree(gpummu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+               .detach = a2xx_gpummu_detach,
+               .map = a2xx_gpummu_map,
+               .unmap = a2xx_gpummu_unmap,
+               .destroy = a2xx_gpummu_destroy,
+               .resume_translation = a2xx_gpummu_resume_translation,
+};
+
+struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+{
+       struct a2xx_gpummu *gpummu;
+
+       gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
+       if (!gpummu)
+               return ERR_PTR(-ENOMEM);
+
+       gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
+               GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
+       if (!gpummu->table) {
+               kfree(gpummu);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gpummu->gpu = gpu;
+       msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
+
+       return &gpummu->base;
+}
+
+void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+               dma_addr_t *tran_error)
+{
+       dma_addr_t base = to_a2xx_gpummu(mmu)->pt_base;
+
+       *pt_base = base;
+       *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
+}
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
deleted file mode 100644 (file)
index f7d1945..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
-
-#include <linux/dma-mapping.h>
-
-#include "msm_drv.h"
-#include "msm_mmu.h"
-#include "adreno/adreno_gpu.h"
-#include "adreno/a2xx.xml.h"
-
-struct msm_gpummu {
-       struct msm_mmu base;
-       struct msm_gpu *gpu;
-       dma_addr_t pt_base;
-       uint32_t *table;
-};
-#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
-
-#define GPUMMU_VA_START SZ_16M
-#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
-#define GPUMMU_PAGE_SIZE SZ_4K
-#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
-
-static void msm_gpummu_detach(struct msm_mmu *mmu)
-{
-}
-
-static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
-               struct sg_table *sgt, size_t len, int prot)
-{
-       struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
-       unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
-       struct sg_dma_page_iter dma_iter;
-       unsigned prot_bits = 0;
-
-       if (prot & IOMMU_WRITE)
-               prot_bits |= 1;
-       if (prot & IOMMU_READ)
-               prot_bits |= 2;
-
-       for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
-               dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
-               int i;
-
-               for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
-                       gpummu->table[idx++] = (addr + i) | prot_bits;
-       }
-
-       /* we can improve by deferring flush for multiple map() */
-       gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
-               A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
-               A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
-       return 0;
-}
-
-static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
-{
-       struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
-       unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
-       unsigned i;
-
-       for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
-                gpummu->table[idx] = 0;
-
-       gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
-               A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
-               A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
-       return 0;
-}
-
-static void msm_gpummu_resume_translation(struct msm_mmu *mmu)
-{
-}
-
-static void msm_gpummu_destroy(struct msm_mmu *mmu)
-{
-       struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
-
-       dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
-               DMA_ATTR_FORCE_CONTIGUOUS);
-
-       kfree(gpummu);
-}
-
-static const struct msm_mmu_funcs funcs = {
-               .detach = msm_gpummu_detach,
-               .map = msm_gpummu_map,
-               .unmap = msm_gpummu_unmap,
-               .destroy = msm_gpummu_destroy,
-               .resume_translation = msm_gpummu_resume_translation,
-};
-
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
-{
-       struct msm_gpummu *gpummu;
-
-       gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
-       if (!gpummu)
-               return ERR_PTR(-ENOMEM);
-
-       gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
-               GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
-       if (!gpummu->table) {
-               kfree(gpummu);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       gpummu->gpu = gpu;
-       msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
-
-       return &gpummu->base;
-}
-
-void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
-               dma_addr_t *tran_error)
-{
-       dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
-
-       *pt_base = base;
-       *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
-}
index eb72d3645c1d71776bd47f09ebfc330933d443e3..88af4f490881f2a6789ae2d03e1c02d10046331a 100644 (file)
@@ -42,7 +42,6 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
 
 struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
 struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
 
 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
                int (*handler)(void *arg, unsigned long iova, int flags, void *data))
@@ -53,10 +52,6 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
 
 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
 
-void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
-               dma_addr_t *tran_error);
-
-
 int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
                int *asid);
 struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);