RDMA/hns: Support QP's restrack ops for hns driver
authorWenpeng Liang <liangwenpeng@huawei.com>
Mon, 22 Aug 2022 10:44:52 +0000 (18:44 +0800)
committerLeon Romanovsky <leonro@nvidia.com>
Tue, 23 Aug 2022 08:35:05 +0000 (11:35 +0300)
The QP restrack attributes come from the queue information maintained by
the driver.

For example:

$ rdma res show qp link hns_0 lqpn 41 -jp -dd
[ {
        "ifindex": 4,
        "ifname": "hns_0",
        "port": 1,
        "lqpn": 41,
        "rqpn": 40,
        "type": "RC",
        "state": "RTR",
        "rq-psn": 12474738,
        "sq-psn": 0,
        "path-mig-state": "ARMED",
        "pdn": 9,
        "pid": 1523,
        "comm": "ib_send_bw"
    },
    "drv_sq_wqe_cnt": 128,
    "drv_sq_max_gs": 1,
    "drv_rq_wqe_cnt": 512,
    "drv_rq_max_gs": 2,
    "drv_ext_sge_sge_cnt": 0
}

Link: https://lore.kernel.org/r/20220822104455.2311053-5-liangwenpeng@huawei.com
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_restrack.c

index c73adc0d35551efb4d7b92fcf7dbcb767b3d183c..7578c0c6313b555002a9fe853b4a8c64b1606d5e 100644 (file)
@@ -1225,6 +1225,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev);
 void hns_roce_exit(struct hns_roce_dev *hr_dev);
 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
 struct hns_user_mmap_entry *
 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
                                size_t length,
index 1b66ed45350eff53fd0b5886fb7c0ac1402f8d75..87442027b80861cfbf038ffd3c98f9dc50ea5512 100644 (file)
@@ -568,6 +568,7 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
 static const struct ib_device_ops hns_roce_dev_restrack_ops = {
        .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
        .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
+       .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
 };
 
 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
index 3f9c2f9dfdf60bc7bad634a92faf343e284f0f85..e8fef37f810d1d6a80d1df358a6d5889882d153b 100644 (file)
@@ -78,3 +78,37 @@ int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
 
        return ret;
 }
+
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+       struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+       struct nlattr *table_attr;
+
+       table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+       if (!table_attr)
+               return -EMSGSIZE;
+
+       if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
+               goto err;
+
+       if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
+               goto err;
+
+       if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
+               goto err;
+
+       if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
+               goto err;
+
+       if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
+               goto err;
+
+       nla_nest_end(msg, table_attr);
+
+       return 0;
+
+err:
+       nla_nest_cancel(msg, table_attr);
+
+       return -EMSGSIZE;
+}