accel/ivpu: Disable buffer sharing among VPU contexts
authorJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Mon, 15 Jan 2024 13:44:32 +0000 (14:44 +0100)
committerJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Mon, 22 Jan 2024 09:28:52 +0000 (10:28 +0100)
This was not supported properly. A buffer was imported to another VPU
context as a separate buffer object with duplicated sgt.
Both exported and imported buffers could be DMA mapped causing a double
mapping on the same device.

Buffers imported from another VPU context will now just increase
reference count, leaving only a single sgt, fixing the problem above.
Buffers still can't be shared among VPU contexts because each has its
own MMU mapping and ivpu_bo only supports single MMU mappings.

The solution would be to use a mapping list as in panfrost or etnaviv
drivers and it will be implemented in future if required.

Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: Andrzej Kacprowski <andrzej.kacprowski@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240115134434.493839-8-jacek.lawrynowicz@linux.intel.com
drivers/accel/ivpu/ivpu_gem.c

index 4de454bfbf917e352e7000c2f69889e1dd4d9ac2..95e731e13941d3ebfb1a5d19b5f79a14c74c160d 100644 (file)
@@ -222,6 +222,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
        struct ivpu_bo *bo = to_ivpu_bo(obj);
        struct ivpu_addr_range *range;
 
+       if (bo->ctx) {
+               ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
+                         file_priv->ctx.id, bo->ctx->id);
+               return -EALREADY;
+       }
+
        if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
                range = &vdev->hw->ranges.shave;
        else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
@@ -252,47 +258,9 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
        drm_gem_shmem_free(&bo->base);
 }
 
-static const struct dma_buf_ops ivpu_bo_dmabuf_ops =  {
-       .cache_sgt_mapping = true,
-       .attach = drm_gem_map_attach,
-       .detach = drm_gem_map_detach,
-       .map_dma_buf = drm_gem_map_dma_buf,
-       .unmap_dma_buf = drm_gem_unmap_dma_buf,
-       .release = drm_gem_dmabuf_release,
-       .mmap = drm_gem_dmabuf_mmap,
-       .vmap = drm_gem_dmabuf_vmap,
-       .vunmap = drm_gem_dmabuf_vunmap,
-};
-
-static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
-{
-       struct drm_device *dev = obj->dev;
-       struct dma_buf_export_info exp_info = {
-               .exp_name = KBUILD_MODNAME,
-               .owner = dev->driver->fops->owner,
-               .ops = &ivpu_bo_dmabuf_ops,
-               .size = obj->size,
-               .flags = flags,
-               .priv = obj,
-               .resv = obj->resv,
-       };
-       void *sgt;
-
-       /*
-        * Make sure that pages are allocated and dma-mapped before exporting the bo.
-        * DMA-mapping is required if the bo will be imported to the same device.
-        */
-       sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
-       if (IS_ERR(sgt))
-               return sgt;
-
-       return drm_gem_dmabuf_export(dev, &exp_info);
-}
-
 static const struct drm_gem_object_funcs ivpu_gem_funcs = {
        .free = ivpu_bo_free,
        .open = ivpu_bo_open,
-       .export = ivpu_bo_export,
        .print_info = drm_gem_shmem_object_print_info,
        .pin = drm_gem_shmem_object_pin,
        .unpin = drm_gem_shmem_object_unpin,