#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
+#include <linux/hashtable.h>
#include "bnxt_ulp.h"
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
- struct bnxt_re_cq *cq;
+ struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
nq = cq->qplib_cq.nq;
+ cctx = rdev->chip_ctx;
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ free_page((unsigned long)cq->uctx_cq_page);
+ hash_del(&cq->hash_entry);
+ }
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
ib_umem_release(cq->umem);
struct bnxt_re_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- int rc, entries;
- int cqe = attr->cqe;
+ struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq = NULL;
+ int rc = -ENOMEM, entries;
unsigned int nq_alloc_cnt;
+ int cqe = attr->cqe;
u32 active_cqs;
if (attr->flags)
}
cq->rdev = rdev;
+ cctx = rdev->chip_ctx;
cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
entries = bnxt_re_init_depth(cqe + 1, uctx);
spin_lock_init(&cq->cq_lock);
if (udata) {
- struct bnxt_re_cq_resp resp;
-
+ struct bnxt_re_cq_resp resp = {};
+
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
+ /* Allocate a page */
+ cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cq->uctx_cq_page)
+ goto c2fail;
+ resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
+ }
resp.cqid = cq->qplib_cq.id;
resp.tail = cq->qplib_cq.hwq.cons;
resp.phase = cq->qplib_cq.period;
resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
- goto c2fail;
+ goto free_mem;
}
}
return 0;
+free_mem:
+ free_page((unsigned long)cq->uctx_cq_page);
c2fail:
ib_umem_release(cq->umem);
fail:
}
}
+static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
+{
+ struct bnxt_re_cq *cq = NULL, *tmp_cq;
+
+ hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
+ if (tmp_cq->qplib_cq.id == cq_id) {
+ cq = tmp_cq;
+ break;
+ }
+ }
+ return cq;
+}
+
/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{
struct bnxt_re_ucontext *uctx;
struct ib_ucontext *ib_uctx;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
u64 mem_offset;
u64 addr = 0;
u32 length;
u32 offset;
+ u32 cq_id;
int err;
ib_uctx = ib_uverbs_get_ucontext(attrs);
switch (res_type) {
case BNXT_RE_CQ_TOGGLE_MEM:
+ err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
+ if (err)
+ return err;
+
+ cq = bnxt_re_search_for_cq(rdev, cq_id);
+ if (!cq)
+ return -EINVAL;
+
+ length = PAGE_SIZE;
+ addr = (u64)cq->uctx_cq_page;
+ mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
+ offset = 0;
+ break;
case BNXT_RE_SRQ_TOGGLE_MEM:
break;
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
+#include <linux/hashtable.h>
#include "bnxt_ulp.h"
#include "roce_hsi.h"
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
"Failed to query hwrm qcaps\n");
+ if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx))
+ cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
}
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
qplib_cq);
+ u32 *cq_ptr;
if (cq->ib_cq.comp_handler) {
- /* Lock comp_handler? */
+ if (cq->uctx_cq_page) {
+ cq_ptr = (u32 *)cq->uctx_cq_page;
+ *cq_ptr = cq->qplib_cq.toggle;
+ }
(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
}
*/
bnxt_re_vf_res_config(rdev);
}
+ hash_init(rdev->cq_hash);
return 0;
free_sctx: