RDMA/hns: Add mapped page count checking for MTR
authorXi Wang <wangxi11@huawei.com>
Fri, 5 Feb 2021 09:39:25 +0000 (17:39 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 9 Feb 2021 00:15:10 +0000 (20:15 -0400)
Add the mapped page count checking flow to avoid invalid page size when
creating MTR.

Fixes: 38389eaa4db1 ("RDMA/hns: Add mtr support for mixed multihop addressing")
Link: https://lore.kernel.org/r/1612517974-31867-4-git-send-email-liweihang@huawei.com
Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_mr.c

index edc9d6b98d95468119682eb2842f3f602ce09887..cfd2e1b60c7f0366877eb4d6f06e053cfac1b782 100644 (file)
@@ -1075,9 +1075,8 @@ static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
                return NULL;
 
        if (exist_bt) {
-               hem->addr = dma_alloc_coherent(hr_dev->dev,
-                                                  count * BA_BYTE_LEN,
-                                                  &hem->dma_addr, GFP_KERNEL);
+               hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
+                                              &hem->dma_addr, GFP_KERNEL);
                if (!hem->addr) {
                        kfree(hem);
                        return NULL;
@@ -1336,6 +1335,10 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
        if (ba_num < 1)
                return -ENOMEM;
 
+       if (ba_num > unit)
+               return -ENOBUFS;
+
+       ba_num = min_t(int, ba_num, unit);
        INIT_LIST_HEAD(&temp_root);
        offset = r->offset;
        /* indicate to last region */
index 5a2a557c37b8a543e22f62e098f56fd29a27efe0..79b3c3023fe7abf36d9a53d0b71dc51f492fd276 100644 (file)
@@ -596,30 +596,26 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
 }
 
 static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
-                         dma_addr_t *pages, struct hns_roce_buf_region *region)
+                         struct hns_roce_buf_region *region, dma_addr_t *pages,
+                         int max_count)
 {
+       int count, npage;
+       int offset, end;
        __le64 *mtts;
-       int offset;
-       int count;
-       int npage;
        u64 addr;
-       int end;
        int i;
 
-       /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
-       if (!region->hopnum)
-               return 0;
-
        offset = region->offset;
        end = offset + region->count;
        npage = 0;
-       while (offset < end) {
+       while (offset < end && npage < max_count) {
+               count = 0;
                mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
                                                  offset, &count, NULL);
                if (!mtts)
                        return -ENOBUFS;
 
-               for (i = 0; i < count; i++) {
+               for (i = 0; i < count && npage < max_count; i++) {
                        if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
                                addr = to_hr_hw_page_addr(pages[npage]);
                        else
@@ -631,7 +627,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
                offset += count;
        }
 
-       return 0;
+       return npage;
 }
 
 static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
@@ -779,8 +775,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
 {
        struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_buf_region *r;
-       unsigned int i;
-       int err;
+       unsigned int i, mapped_cnt;
+       int ret;
 
        /*
         * Only use the first page address as root ba when hopnum is 0, this
@@ -791,26 +787,42 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
                return 0;
        }
 
-       for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+       for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
+            mapped_cnt < page_cnt; i++) {
                r = &mtr->hem_cfg.region[i];
+               /* if hopnum is 0, no need to map pages in this region */
+               if (!r->hopnum) {
+                       mapped_cnt += r->count;
+                       continue;
+               }
+
                if (r->offset + r->count > page_cnt) {
-                       err = -EINVAL;
+                       ret = -EINVAL;
                        ibdev_err(ibdev,
                                  "failed to check mtr%u end %u + %u, max %u.\n",
                                  i, r->offset, r->count, page_cnt);
-                       return err;
+                       return ret;
                }
 
-               err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
-               if (err) {
+               ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
+                                    page_cnt - mapped_cnt);
+               if (ret < 0) {
                        ibdev_err(ibdev,
                                  "failed to map mtr%u offset %u, ret = %d.\n",
-                                 i, r->offset, err);
-                       return err;
+                                 i, r->offset, ret);
+                       return ret;
                }
+               mapped_cnt += ret;
+               ret = 0;
        }
 
-       return 0;
+       if (mapped_cnt < page_cnt) {
+               ret = -ENOBUFS;
+               ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
+                         mapped_cnt, page_cnt);
+       }
+
+       return ret;
 }
 
 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,