RDMA/mana_ib: Use struct mana_ib_queue for CQs
authorKonstantin Taranov <kotaranov@microsoft.com>
Tue, 26 Mar 2024 20:08:06 +0000 (13:08 -0700)
committerLeon Romanovsky <leon@kernel.org>
Tue, 2 Apr 2024 08:30:23 +0000 (11:30 +0300)
Use struct mana_ib_queue and its helpers for CQs

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Link: https://lore.kernel.org/r/1711483688-24358-3-git-send-email-kotaranov@linux.microsoft.com
Reviewed-by: Long Li <longli@microsoft.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mana/cq.c
drivers/infiniband/hw/mana/mana_ib.h
drivers/infiniband/hw/mana/qp.c

index 4a71e678d09c19c50d6f7d1d5105dec8a8cc38f7..c9129218f1be170cf91a59d4fe18b0689dc5ef80 100644 (file)
@@ -39,37 +39,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        }
 
        cq->cqe = attr->cqe;
-       cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
-                              IB_ACCESS_LOCAL_WRITE);
-       if (IS_ERR(cq->umem)) {
-               err = PTR_ERR(cq->umem);
-               ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
-                         err);
-               return err;
-       }
-
-       err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
+       err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
        if (err) {
-               ibdev_dbg(ibdev,
-                         "Failed to create dma region for create cq, %d\n",
-                         err);
-               goto err_release_umem;
+               ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
+               return err;
        }
 
-       ibdev_dbg(ibdev,
-                 "create_dma_region ret %d gdma_region 0x%llx\n",
-                 err, cq->gdma_region);
-
-       /*
-        * The CQ ID is not known at this time. The ID is generated at create_qp
-        */
-       cq->id = INVALID_QUEUE_ID;
-
        return 0;
-
-err_release_umem:
-       ib_umem_release(cq->umem);
-       return err;
 }
 
 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
@@ -78,24 +54,16 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
        struct ib_device *ibdev = ibcq->device;
        struct mana_ib_dev *mdev;
        struct gdma_context *gc;
-       int err;
 
        mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
        gc = mdev_to_gc(mdev);
 
-       err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
-       if (err) {
-               ibdev_dbg(ibdev,
-                         "Failed to destroy dma region, %d\n", err);
-               return err;
-       }
-
-       if (cq->id != INVALID_QUEUE_ID) {
-               kfree(gc->cq_table[cq->id]);
-               gc->cq_table[cq->id] = NULL;
+       if (cq->queue.id != INVALID_QUEUE_ID) {
+               kfree(gc->cq_table[cq->queue.id]);
+               gc->cq_table[cq->queue.id] = NULL;
        }
 
-       ib_umem_release(cq->umem);
+       mana_ib_destroy_queue(mdev, &cq->queue);
 
        return 0;
 }
@@ -114,7 +82,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
        struct gdma_queue *gdma_cq;
 
        /* Create CQ table entry */
-       WARN_ON(gc->cq_table[cq->id]);
+       WARN_ON(gc->cq_table[cq->queue.id]);
        gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
        if (!gdma_cq)
                return -ENOMEM;
@@ -122,7 +90,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
        gdma_cq->cq.context = cq;
        gdma_cq->type = GDMA_CQ;
        gdma_cq->cq.callback = mana_ib_cq_handler;
-       gdma_cq->id = cq->id;
-       gc->cq_table[cq->id] = gdma_cq;
+       gdma_cq->id = cq->queue.id;
+       gc->cq_table[cq->queue.id] = gdma_cq;
        return 0;
 }
index 859fd3bfc764f24b920134a842e1eeea2531076f..6acb5c281c36842da97ff3e729ca91596c6da524 100644 (file)
@@ -88,10 +88,8 @@ struct mana_ib_mr {
 
 struct mana_ib_cq {
        struct ib_cq ibcq;
-       struct ib_umem *umem;
+       struct mana_ib_queue queue;
        int cqe;
-       u64 gdma_region;
-       u64 id;
        u32 comp_vector;
 };
 
index 6e7627745c9578eada23d5db93e79b38cfd6307b..d7485ee6a6854b13f221e1ab5152f41f57f908b4 100644 (file)
@@ -197,7 +197,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
                wq_spec.gdma_region = wq->gdma_region;
                wq_spec.queue_size = wq->wq_buf_size;
 
-               cq_spec.gdma_region = cq->gdma_region;
+               cq_spec.gdma_region = cq->queue.gdma_region;
                cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
                cq_spec.modr_ctx_id = 0;
                eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
@@ -213,16 +213,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
 
                /* The GDMA regions are now owned by the WQ object */
                wq->gdma_region = GDMA_INVALID_DMA_REGION;
-               cq->gdma_region = GDMA_INVALID_DMA_REGION;
+               cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
 
                wq->id = wq_spec.queue_index;
-               cq->id = cq_spec.queue_index;
+               cq->queue.id = cq_spec.queue_index;
 
                ibdev_dbg(&mdev->ib_dev,
                          "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
-                         ret, wq->rx_object, wq->id, cq->id);
+                         ret, wq->rx_object, wq->id, cq->queue.id);
 
-               resp.entries[i].cqid = cq->id;
+               resp.entries[i].cqid = cq->queue.id;
                resp.entries[i].wqid = wq->id;
 
                mana_ind_table[i] = wq->rx_object;
@@ -232,7 +232,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
                if (ret)
                        goto fail;
 
-               gdma_cq_allocated[i] = gc->cq_table[cq->id];
+               gdma_cq_allocated[i] = gc->cq_table[cq->queue.id];
        }
        resp.num_entries = i;
 
@@ -264,7 +264,7 @@ fail:
                wq = container_of(ibwq, struct mana_ib_wq, ibwq);
                cq = container_of(ibcq, struct mana_ib_cq, ibcq);
 
-               gc->cq_table[cq->id] = NULL;
+               gc->cq_table[cq->queue.id] = NULL;
                kfree(gdma_cq_allocated[i]);
 
                mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
@@ -374,7 +374,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
        wq_spec.gdma_region = qp->sq_gdma_region;
        wq_spec.queue_size = ucmd.sq_buf_size;
 
-       cq_spec.gdma_region = send_cq->gdma_region;
+       cq_spec.gdma_region = send_cq->queue.gdma_region;
        cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
        cq_spec.modr_ctx_id = 0;
        eq_vec = send_cq->comp_vector % gc->max_num_queues;
@@ -392,10 +392,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 
        /* The GDMA regions are now owned by the WQ object */
        qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
-       send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
+       send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
 
        qp->sq_id = wq_spec.queue_index;
-       send_cq->id = cq_spec.queue_index;
+       send_cq->queue.id = cq_spec.queue_index;
 
        /* Create CQ table entry */
        err = mana_ib_install_cq_cb(mdev, send_cq);
@@ -404,10 +404,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 
        ibdev_dbg(&mdev->ib_dev,
                  "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
-                 qp->tx_object, qp->sq_id, send_cq->id);
+                 qp->tx_object, qp->sq_id, send_cq->queue.id);
 
        resp.sqid = qp->sq_id;
-       resp.cqid = send_cq->id;
+       resp.cqid = send_cq->queue.id;
        resp.tx_vp_offset = pd->tx_vp_offset;
 
        err = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -422,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 
 err_release_gdma_cq:
        kfree(gdma_cq);
-       gc->cq_table[send_cq->id] = NULL;
+       gc->cq_table[send_cq->queue.id] = NULL;
 
 err_destroy_wq_obj:
        mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);