drm/nouveau: uvmm: rename 'umgr' to 'base'
authorDanilo Krummrich <dakr@redhat.com>
Wed, 20 Sep 2023 14:42:36 +0000 (16:42 +0200)
committerDanilo Krummrich <dakr@redhat.com>
Mon, 25 Sep 2023 23:58:29 +0000 (01:58 +0200)
Rename struct drm_gpuvm within struct nouveau_uvmm from 'umgr' to base.

Reviewed-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230920144343.64830-4-dakr@redhat.com
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_exec.c
drivers/gpu/drm/nouveau/nouveau_uvmm.c
drivers/gpu/drm/nouveau/nouveau_uvmm.h

index 053f703f2f68d73a8bb7b2357735cfa77c2d9a63..e83db051e8515a0b4624cedbac432ef905d611ed 100644 (file)
@@ -231,7 +231,7 @@ nouveau_debugfs_gpuva(struct seq_file *m, void *data)
                        continue;
 
                nouveau_uvmm_lock(uvmm);
-               drm_debugfs_gpuva_info(m, &uvmm->umgr);
+               drm_debugfs_gpuva_info(m, &uvmm->base);
                seq_puts(m, "\n");
                nouveau_debugfs_gpuva_regions(m, uvmm);
                nouveau_uvmm_unlock(uvmm);
index c001952cd6788ed3b8029366533cdf7fec2098ea..b4239af29e5aabb24bba3765a8483fad6cfbd7b0 100644 (file)
@@ -106,8 +106,8 @@ nouveau_exec_job_submit(struct nouveau_job *job)
        drm_exec_until_all_locked(exec) {
                struct drm_gpuva *va;
 
-               drm_gpuvm_for_each_va(va, &uvmm->umgr) {
-                       if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+               drm_gpuvm_for_each_va(va, &uvmm->base) {
+                       if (unlikely(va == &uvmm->base.kernel_alloc_node))
                                continue;
 
                        ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
index 4ba198519b381332df5b074d2c18c0185dad5e5f..5cf892c50f43ca4d4e78dc9e8f43636e61f6a2e6 100644 (file)
@@ -329,7 +329,7 @@ nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
        struct nouveau_uvma_region *reg;
        int ret;
 
-       if (!drm_gpuvm_interval_empty(&uvmm->umgr, addr, range))
+       if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range))
                return -ENOSPC;
 
        ret = nouveau_uvma_region_alloc(&reg);
@@ -384,7 +384,7 @@ nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
 {
        struct nouveau_uvmm *uvmm = reg->uvmm;
 
-       return drm_gpuvm_interval_empty(&uvmm->umgr,
+       return drm_gpuvm_interval_empty(&uvmm->base,
                                        reg->va.addr,
                                        reg->va.range);
 }
@@ -589,7 +589,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
        uvma->region = args->region;
        uvma->kind = args->kind;
 
-       drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
+       drm_gpuva_map(&uvmm->base, &uvma->va, op);
 
        /* Keep a reference until this uvma is destroyed. */
        nouveau_uvma_gem_get(uvma);
@@ -1194,7 +1194,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
                                goto unwind_continue;
                        }
 
-                       op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->umgr,
+                       op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
                                                                op->va.addr,
                                                                op->va.range);
                        if (IS_ERR(op->ops)) {
@@ -1205,7 +1205,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
                        ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
                                                            op->ops);
                        if (ret) {
-                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               drm_gpuva_ops_free(&uvmm->base, op->ops);
                                op->ops = NULL;
                                op->reg = NULL;
                                goto unwind_continue;
@@ -1240,7 +1240,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
                                }
                        }
 
-                       op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->umgr,
+                       op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
                                                              op->va.addr,
                                                              op->va.range,
                                                              op->gem.obj,
@@ -1256,7 +1256,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
                                                          op->va.range,
                                                          op->flags & 0xff);
                        if (ret) {
-                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               drm_gpuva_ops_free(&uvmm->base, op->ops);
                                op->ops = NULL;
                                goto unwind_continue;
                        }
@@ -1264,7 +1264,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
                        break;
                }
                case OP_UNMAP:
-                       op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->umgr,
+                       op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
                                                                op->va.addr,
                                                                op->va.range);
                        if (IS_ERR(op->ops)) {
@@ -1275,7 +1275,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
                        ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
                                                            op->ops);
                        if (ret) {
-                               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                               drm_gpuva_ops_free(&uvmm->base, op->ops);
                                op->ops = NULL;
                                goto unwind_continue;
                        }
@@ -1404,7 +1404,7 @@ unwind:
                        break;
                }
 
-               drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+               drm_gpuva_ops_free(&uvmm->base, op->ops);
                op->ops = NULL;
                op->reg = NULL;
        }
@@ -1509,7 +1509,7 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
                }
 
                if (!IS_ERR_OR_NULL(op->ops))
-                       drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+                       drm_gpuva_ops_free(&uvmm->base, op->ops);
 
                if (obj)
                        drm_gem_object_put(obj);
@@ -1836,7 +1836,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
        uvmm->kernel_managed_addr = kernel_managed_addr;
        uvmm->kernel_managed_size = kernel_managed_size;
 
-       drm_gpuvm_init(&uvmm->umgr, cli->name,
+       drm_gpuvm_init(&uvmm->base, cli->name,
                       NOUVEAU_VA_SPACE_START,
                       NOUVEAU_VA_SPACE_END,
                       kernel_managed_addr, kernel_managed_size,
@@ -1855,7 +1855,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
        return 0;
 
 out_free_gpuva_mgr:
-       drm_gpuvm_destroy(&uvmm->umgr);
+       drm_gpuvm_destroy(&uvmm->base);
 out_unlock:
        mutex_unlock(&cli->mutex);
        return ret;
@@ -1877,11 +1877,11 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
        wait_event(entity->job.wq, list_empty(&entity->job.list.head));
 
        nouveau_uvmm_lock(uvmm);
-       drm_gpuvm_for_each_va_safe(va, next, &uvmm->umgr) {
+       drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
                struct nouveau_uvma *uvma = uvma_from_va(va);
                struct drm_gem_object *obj = va->gem.obj;
 
-               if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+               if (unlikely(va == &uvmm->base.kernel_alloc_node))
                        continue;
 
                drm_gpuva_remove(va);
@@ -1910,7 +1910,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
 
        mutex_lock(&cli->mutex);
        nouveau_vmm_fini(&uvmm->vmm);
-       drm_gpuvm_destroy(&uvmm->umgr);
+       drm_gpuvm_destroy(&uvmm->base);
        mutex_unlock(&cli->mutex);
 
        dma_resv_fini(&uvmm->resv);
index e96c9919d1bd2f53c16e9337e871303c5799db5e..a308c59760a54a83ebd42aec8ac3354509edaa61 100644 (file)
@@ -8,8 +8,8 @@
 #include "nouveau_drv.h"
 
 struct nouveau_uvmm {
+       struct drm_gpuvm base;
        struct nouveau_vmm vmm;
-       struct drm_gpuvm umgr;
        struct maple_tree region_mt;
        struct mutex mutex;
        struct dma_resv resv;
@@ -41,10 +41,10 @@ struct nouveau_uvma {
        u8 kind;
 };
 
-#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
+#define uvmm_from_gpuvm(x) container_of((x), struct nouveau_uvmm, base)
 #define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
 
-#define to_uvmm(x) uvmm_from_mgr((x)->va.vm)
+#define to_uvmm(x) uvmm_from_gpuvm((x)->va.vm)
 
 struct nouveau_uvmm_bind_job {
        struct nouveau_job base;