RDMA/irdma: Use HW specific minimum WQ size
authorSindhu Devale <sindhu.devale@intel.com>
Tue, 25 Jul 2023 15:55:25 +0000 (10:55 -0500)
committerLeon Romanovsky <leon@kernel.org>
Sun, 30 Jul 2023 12:43:00 +0000 (15:43 +0300)
HW GEN1 and GEN2 have different min WQ sizes but they are
currently set to the same value.

Use a gen specific attribute min_hw_wq_size and extend ABI to
pass it to user-space.

Signed-off-by: Sindhu Devale <sindhu.devale@intel.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Link: https://lore.kernel.org/r/20230725155525.1081-3-shiraz.saleem@intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/irdma/i40iw_hw.c
drivers/infiniband/hw/irdma/i40iw_hw.h
drivers/infiniband/hw/irdma/icrdma_hw.c
drivers/infiniband/hw/irdma/icrdma_hw.h
drivers/infiniband/hw/irdma/irdma.h
drivers/infiniband/hw/irdma/uk.c
drivers/infiniband/hw/irdma/user.h
drivers/infiniband/hw/irdma/verbs.c
include/uapi/rdma/irdma-abi.h

index 37a40fb4d0d7e87deeb724f522c78323c82586a9..638d127fb3e0ee4f6dda8a42e41ceda214ec774e 100644 (file)
@@ -254,5 +254,6 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
        dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1;
        dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
        dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
+       dev->hw_attrs.uk_attrs.min_hw_wq_size = I40IW_MIN_WQ_SIZE;
        dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
 }
index 1c438b3593ea8cd82b8b94257de5fabe5c9f03d9..10afc165f5ea97dc2c835a50060853f06403a000 100644 (file)
@@ -140,11 +140,11 @@ enum i40iw_device_caps_const {
        I40IW_MAX_CQ_SIZE                       = 1048575,
        I40IW_MAX_OUTBOUND_MSG_SIZE             = 2147483647,
        I40IW_MAX_INBOUND_MSG_SIZE              = 2147483647,
+       I40IW_MIN_WQ_SIZE                       = 4 /* WQEs */,
 };
 
 #define I40IW_QP_WQE_MIN_SIZE   32
 #define I40IW_QP_WQE_MAX_SIZE   128
-#define I40IW_QP_SW_MIN_WQSIZE  4
 #define I40IW_MAX_RQ_WQE_SHIFT  2
 #define I40IW_MAX_QUANTA_PER_WR 2
 
index 298d14905993b9e07d841ba2224e5e07d368cac9..10ccf4bc3f2d28ec673fa09521a220fa5dbec1a2 100644 (file)
@@ -195,6 +195,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
        dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
        dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
 
+       dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
        dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
        dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
                                                IRDMA_FEATURE_CQ_RESIZE;
index b65c463abf0b39d6a724d23d7ece53b8667ea76e..54035a08cc93b6e2130f851bdc253932f01d0d05 100644 (file)
@@ -64,6 +64,7 @@ enum icrdma_device_caps_const {
 
        ICRDMA_MAX_IRD_SIZE                     = 127,
        ICRDMA_MAX_ORD_SIZE                     = 255,
+       ICRDMA_MIN_WQ_SIZE                      = 8 /* WQEs */,
 
 };
 
index 173e2dc2fc3556408724d580fd8d0078af39ee33..3237fa64bc8f950f5a439d71366786e6d5dae0b0 100644 (file)
@@ -119,6 +119,7 @@ struct irdma_uk_attrs {
        u32 min_hw_cq_size;
        u32 max_hw_cq_size;
        u16 max_hw_sq_chunk;
+       u16 min_hw_wq_size;
        u8 hw_rev;
 };
 
index fd337caa2e3b7aa64f359a510d7ca58eda5a6472..ac650a784245b296d2f57f226ba931bbf07bcda0 100644 (file)
@@ -1349,10 +1349,12 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
                      u32 *sqdepth)
 {
+       u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
        *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
 
-       if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
-               *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+       if (*sqdepth < min_size)
+               *sqdepth = min_size;
        else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
                return -EINVAL;
 
@@ -1369,10 +1371,12 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
                      u32 *rqdepth)
 {
+       u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
        *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
 
-       if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
-               *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+       if (*rqdepth < min_size)
+               *rqdepth = min_size;
        else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
                return -EINVAL;
 
index 1e0e1a71dbadad5cc77fcfc8e1e413675d5e16e3..dd145ec72a915888c7d00e9b5d6d82c4097f984f 100644 (file)
@@ -85,6 +85,7 @@ enum irdma_device_caps_const {
        IRDMA_Q2_BUF_SIZE =                     256,
        IRDMA_QP_CTX_SIZE =                     256,
        IRDMA_MAX_PDS =                         262144,
+       IRDMA_MIN_WQ_SIZE_GEN2 =                8,
 };
 
 enum irdma_addressing_type {
index 0187cff7b9c6faffae06941eed2aff26f3fad9af..b9420b0c42b33881b3e20474457a774d1509f354 100644 (file)
@@ -330,6 +330,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
                uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
                uresp.hw_rev = uk_attrs->hw_rev;
                uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
+               uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
+               uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
                if (ib_copy_to_udata(udata, &uresp,
                                     min(sizeof(uresp), udata->outlen))) {
                        rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
index 3a0cde4dcf331f23d1749f6d52823558c1c5ce35..bb18f15489e37ba8bc38bc4d252c33e7d2b1a2a3 100644 (file)
@@ -24,6 +24,7 @@ enum irdma_memreg_type {
 
 enum {
        IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+       IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
 };
 
 struct irdma_alloc_ucontext_req {
@@ -52,6 +53,8 @@ struct irdma_alloc_ucontext_resp {
        __u8 hw_rev;
        __u8 rsvd2;
        __aligned_u64 comp_mask;
+       __u16 min_hw_wq_size;
+       __u8 rsvd3[6];
 };
 
 struct irdma_alloc_pd_resp {