RDMA/hns: Refactor mtr_init_buf_cfg()
authorChengchang Tang <tangchengchang@huawei.com>
Sat, 13 Jan 2024 08:59:31 +0000 (16:59 +0800)
committerLeon Romanovsky <leon@kernel.org>
Thu, 25 Jan 2024 09:54:38 +0000 (11:54 +0200)
page_shift and page_cnt is only used in mtr_map_bufs(). And these
parameter could be calculated indepedently.

Strip the computation of page_shift and page_cnt from mtr_init_buf_cfg(),
reducing the number of parameters of it. This helps reducing coupling
between mtr_init_buf_cfg() and mtr_map_bufs().

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
Link: https://lore.kernel.org/r/20240113085935.2838701-3-huangjunxian6@hisilicon.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/hns/hns_roce_mr.c

index 9537a2c00bb6fed78a0f9d28d00fee660371f33d..adc401aea8df28ffb328c44114711bc3cf3f6c4a 100644 (file)
@@ -707,14 +707,37 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
        return 0;
 }
 
-static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
-                       int page_count, unsigned int page_shift)
+static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr)
+{
+       struct hns_roce_buf_region *region;
+       int page_cnt = 0;
+       int i;
+
+       for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+               region = &mtr->hem_cfg.region[i];
+               page_cnt += region->count;
+       }
+
+       return page_cnt;
+}
+
+static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
 {
        struct ib_device *ibdev = &hr_dev->ib_dev;
+       int page_count = cal_mtr_pg_cnt(mtr);
+       unsigned int page_shift;
        dma_addr_t *pages;
        int npage;
        int ret;
 
+       /* When HEM buffer uses 0-level addressing, the page size is
+        * equal to the whole buffer size, and we split the buffer into
+        * small pages which is used to check whether the adjacent
+        * units are in the continuous space and its size is fixed to
+        * 4K based on hns ROCEE's requirement.
+        */
+       page_shift = mtr->hem_cfg.is_direct ? HNS_HW_PAGE_SHIFT :
+                                             mtr->hem_cfg.buf_pg_shift;
        /* alloc a tmp array to store buffer's dma address */
        pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
        if (!pages)
@@ -894,37 +917,30 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
 
 static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
                            struct hns_roce_buf_attr *attr,
-                           struct hns_roce_hem_cfg *cfg,
-                           unsigned int *buf_page_shift, u64 unalinged_size)
+                           struct hns_roce_hem_cfg *cfg, u64 unalinged_size)
 {
+       struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_buf_region *r;
        u64 first_region_padding;
        int page_cnt, region_cnt;
-       unsigned int page_shift;
+       size_t buf_pg_sz;
        size_t buf_size;
 
        /* If mtt is disabled, all pages must be within a continuous range */
        cfg->is_direct = !mtr_has_mtt(attr);
        buf_size = mtr_bufs_size(attr);
        if (cfg->is_direct) {
-               /* When HEM buffer uses 0-level addressing, the page size is
-                * equal to the whole buffer size, and we split the buffer into
-                * small pages which is used to check whether the adjacent
-                * units are in the continuous space and its size is fixed to
-                * 4K based on hns ROCEE's requirement.
-                */
-               page_shift = HNS_HW_PAGE_SHIFT;
-
-               /* The ROCEE requires the page size to be 4K * 2 ^ N. */
+               buf_pg_sz = HNS_HW_PAGE_SIZE;
                cfg->buf_pg_count = 1;
+               /* The ROCEE requires the page size to be 4K * 2 ^ N. */
                cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
                        order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
                first_region_padding = 0;
        } else {
-               page_shift = attr->page_shift;
                cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
-                                                1 << page_shift);
-               cfg->buf_pg_shift = page_shift;
+                                                1 << attr->page_shift);
+               cfg->buf_pg_shift = attr->page_shift;
+               buf_pg_sz = 1 << cfg->buf_pg_shift;
                first_region_padding = unalinged_size;
        }
 
@@ -937,7 +953,7 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
                r->offset = page_cnt;
                buf_size = hr_hw_page_align(attr->region[region_cnt].size +
                                            first_region_padding);
-               r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
+               r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
                first_region_padding = 0;
                page_cnt += r->count;
                r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
@@ -945,9 +961,13 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
        }
 
        cfg->region_count = region_cnt;
-       *buf_page_shift = page_shift;
+       if (cfg->region_count < 1 || cfg->buf_pg_shift < HNS_HW_PAGE_SHIFT) {
+               ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
+                         cfg->region_count, cfg->buf_pg_shift);
+               return -EINVAL;
+       }
 
-       return page_cnt;
+       return 0;
 }
 
 static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
@@ -1035,18 +1055,12 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
                        unsigned long user_addr)
 {
        struct ib_device *ibdev = &hr_dev->ib_dev;
-       unsigned int buf_page_shift = 0;
-       int buf_page_cnt;
        int ret;
 
-       buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
-                                       &buf_page_shift,
-                                       udata ? user_addr & ~PAGE_MASK : 0);
-       if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
-               ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
-                         buf_page_cnt, buf_page_shift);
-               return -EINVAL;
-       }
+       ret = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
+                              udata ? user_addr & ~PAGE_MASK : 0);
+       if (ret)
+               return ret;
 
        ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
        if (ret) {
@@ -1070,7 +1084,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
        }
 
        /* Write buffer's dma address to MTT */
-       ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
+       ret = mtr_map_bufs(hr_dev, mtr);
        if (ret)
                ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
        else