RDMA/irdma: Add support for dmabuf pin memory regions
authorZhu Yanjun <yanjun.zhu@linux.dev>
Fri, 17 Feb 2023 01:14:25 +0000 (09:14 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 17 Feb 2023 20:36:14 +0000 (16:36 -0400)
This is a followup to the EFA dmabuf[1]. Irdma driver currently does
not support on-demand-paging(ODP). So it uses habanalabs as the
dmabuf exporter, and irdma as the importer to allow for peer2peer
access through libibverbs.

In this commit, the function ib_umem_dmabuf_get_pinned() is used.
This function is introduced in EFA dmabuf[1] which allows the driver
to get a dmabuf umem which is pinned and does not require move_notify
callback implementation. The returned umem is pinned and DMA mapped
like standard cpu umems, and is released through ib_umem_release().

[1]https://lore.kernel.org/lkml/20211007114018.GD2688930@ziepe.ca/t/

Link: https://lore.kernel.org/r/20230217011425.498847-1-yanjun.zhu@intel.com
Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/irdma/verbs.c

index 6982f38596c8e107b039bf31d7d9efef10bd7a21..1b2e3e800c9a663c74ae1201f1aad549bcf8202e 100644 (file)
@@ -2977,6 +2977,47 @@ error:
        return ERR_PTR(err);
 }
 
+static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+                                             u64 len, u64 virt,
+                                             int fd, int access,
+                                             struct ib_udata *udata)
+{
+       struct irdma_device *iwdev = to_iwdev(pd->device);
+       struct ib_umem_dmabuf *umem_dmabuf;
+       struct irdma_mr *iwmr;
+       int err;
+
+       if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+               return ERR_PTR(-EINVAL);
+
+       umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
+       if (IS_ERR(umem_dmabuf)) {
+               err = PTR_ERR(umem_dmabuf);
+               ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
+               return ERR_PTR(err);
+       }
+
+       iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
+       if (IS_ERR(iwmr)) {
+               err = PTR_ERR(iwmr);
+               goto err_release;
+       }
+
+       err = irdma_reg_user_mr_type_mem(iwmr, access);
+       if (err)
+               goto err_iwmr;
+
+       return &iwmr->ibmr;
+
+err_iwmr:
+       irdma_free_iwmr(iwmr);
+
+err_release:
+       ib_umem_release(&umem_dmabuf->umem);
+
+       return ERR_PTR(err);
+}
+
 /**
  * irdma_reg_phys_mr - register kernel physical memory
  * @pd: ibpd pointer
@@ -4483,6 +4524,7 @@ static const struct ib_device_ops irdma_dev_ops = {
        .query_port = irdma_query_port,
        .query_qp = irdma_query_qp,
        .reg_user_mr = irdma_reg_user_mr,
+       .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
        .req_notify_cq = irdma_req_notify_cq,
        .resize_cq = irdma_resize_cq,
        INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),