RDMA/hns: Fix return value in hns_roce_map_mr_sg
authorZhengchao Shao <shaozhengchao@huawei.com>
Thu, 11 Apr 2024 03:38:51 +0000 (11:38 +0800)
committerLeon Romanovsky <leon@kernel.org>
Tue, 16 Apr 2024 11:59:08 +0000 (14:59 +0300)
As described in the ib_map_mr_sg function comment, it returns the number
of sg elements that were mapped to the memory region. However,
hns_roce_map_mr_sg returns the number of pages required for mapping the
DMA area. Fix it.

Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process")
Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
Link: https://lore.kernel.org/r/20240411033851.2884771-1-shaozhengchao@huawei.com
Reviewed-by: Junxian Huang <huangjunxian6@hisilicon.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/hns/hns_roce_mr.c

index 9e05b57a2d67d4bdf7d6c96c29f0cbcac4deb021..80c050d7d0ea644e14a79b66c76bfed6fb6591f6 100644 (file)
@@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
        struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_mr *mr = to_hr_mr(ibmr);
        struct hns_roce_mtr *mtr = &mr->pbl_mtr;
-       int ret = 0;
+       int ret, sg_num = 0;
 
        mr->npages = 0;
        mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
                                 sizeof(dma_addr_t), GFP_KERNEL);
        if (!mr->page_list)
-               return ret;
+               return sg_num;
 
-       ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
-       if (ret < 1) {
+       sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+       if (sg_num < 1) {
                ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
-                         mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
+                         mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
                goto err_page_list;
        }
 
@@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
        ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
        if (ret) {
                ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
-               ret = 0;
+               sg_num = 0;
        } else {
                mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
-               ret = mr->npages;
        }
 
 err_page_list:
        kvfree(mr->page_list);
        mr->page_list = NULL;
 
-       return ret;
+       return sg_num;
 }
 
 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,