RDMA/hns: Package for hns_roce_rereg_user_mr function
authorLijun Ou <oulijun@huawei.com>
Mon, 8 Jul 2019 13:41:23 +0000 (21:41 +0800)
committerJason Gunthorpe <jgg@mellanox.com>
Thu, 25 Jul 2019 15:23:11 +0000 (12:23 -0300)
Move some code of the hns_roce_rereg_user_mr() function into an
independent function in oder to improve readability.

Link: https://lore.kernel.org/r/1562593285-8037-8-git-send-email-oulijun@huawei.com
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/hns/hns_roce_mr.c

index c4b758cf7dad7396969608e7b9256a73b2833b27..0cfa94605f7731139446e7425c5f0609cb3a1b79 100644 (file)
@@ -1206,6 +1206,83 @@ err_free:
        return ERR_PTR(ret);
 }
 
+static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
+                         u64 start, u64 length,
+                         u64 virt_addr, int mr_access_flags,
+                         struct hns_roce_cmd_mailbox *mailbox,
+                         u32 pdn, struct ib_udata *udata)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+       struct hns_roce_mr *mr = to_hr_mr(ibmr);
+       struct device *dev = hr_dev->dev;
+       int npages;
+       int ret;
+
+       if (mr->size != ~0ULL) {
+               npages = ib_umem_page_count(mr->umem);
+
+               if (hr_dev->caps.pbl_hop_num)
+                       hns_roce_mhop_free(hr_dev, mr);
+               else
+                       dma_free_coherent(dev, npages * 8,
+                                         mr->pbl_buf, mr->pbl_dma_addr);
+       }
+       ib_umem_release(mr->umem);
+
+       mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+       if (IS_ERR(mr->umem)) {
+               ret = PTR_ERR(mr->umem);
+               mr->umem = NULL;
+               return -ENOMEM;
+       }
+       npages = ib_umem_page_count(mr->umem);
+
+       if (hr_dev->caps.pbl_hop_num) {
+               ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+               if (ret)
+                       goto release_umem;
+       } else {
+               mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+                                                &(mr->pbl_dma_addr),
+                                                GFP_KERNEL);
+               if (!mr->pbl_buf) {
+                       ret = -ENOMEM;
+                       goto release_umem;
+               }
+       }
+
+       ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
+                                          mr_access_flags, virt_addr,
+                                          length, mailbox->buf);
+       if (ret)
+               goto release_umem;
+
+
+       ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
+       if (ret) {
+               if (mr->size != ~0ULL) {
+                       npages = ib_umem_page_count(mr->umem);
+
+                       if (hr_dev->caps.pbl_hop_num)
+                               hns_roce_mhop_free(hr_dev, mr);
+                       else
+                               dma_free_coherent(dev, npages * 8,
+                                                 mr->pbl_buf,
+                                                 mr->pbl_dma_addr);
+               }
+
+               goto release_umem;
+       }
+
+       return 0;
+
+release_umem:
+       ib_umem_release(mr->umem);
+       return ret;
+
+}
+
+
 int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
                           u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
                           struct ib_udata *udata)
@@ -1216,7 +1293,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
        struct device *dev = hr_dev->dev;
        unsigned long mtpt_idx;
        u32 pdn = 0;
-       int npages;
        int ret;
 
        if (!mr->enabled)
@@ -1243,73 +1319,25 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
                pdn = to_hr_pd(pd)->pdn;
 
        if (flags & IB_MR_REREG_TRANS) {
-               if (mr->size != ~0ULL) {
-                       npages = ib_umem_page_count(mr->umem);
-
-                       if (hr_dev->caps.pbl_hop_num)
-                               hns_roce_mhop_free(hr_dev, mr);
-                       else
-                               dma_free_coherent(dev, npages * 8, mr->pbl_buf,
-                                                 mr->pbl_dma_addr);
-               }
-               ib_umem_release(mr->umem);
-
-               mr->umem =
-                       ib_umem_get(udata, start, length, mr_access_flags, 0);
-               if (IS_ERR(mr->umem)) {
-                       ret = PTR_ERR(mr->umem);
-                       mr->umem = NULL;
+               ret = rereg_mr_trans(ibmr, flags,
+                                    start, length,
+                                    virt_addr, mr_access_flags,
+                                    mailbox, pdn, udata);
+               if (ret)
                        goto free_cmd_mbox;
-               }
-               npages = ib_umem_page_count(mr->umem);
-
-               if (hr_dev->caps.pbl_hop_num) {
-                       ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
-                       if (ret)
-                               goto release_umem;
-               } else {
-                       mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
-                                                        &(mr->pbl_dma_addr),
-                                                        GFP_KERNEL);
-                       if (!mr->pbl_buf) {
-                               ret = -ENOMEM;
-                               goto release_umem;
-                       }
-               }
-       }
-
-       ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
-                                          mr_access_flags, virt_addr,
-                                          length, mailbox->buf);
-       if (ret) {
-               if (flags & IB_MR_REREG_TRANS)
-                       goto release_umem;
-               else
+       } else {
+               ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
+                                                  mr_access_flags, virt_addr,
+                                                  length, mailbox->buf);
+               if (ret)
                        goto free_cmd_mbox;
        }
 
-       if (flags & IB_MR_REREG_TRANS) {
-               ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
-               if (ret) {
-                       if (mr->size != ~0ULL) {
-                               npages = ib_umem_page_count(mr->umem);
-
-                               if (hr_dev->caps.pbl_hop_num)
-                                       hns_roce_mhop_free(hr_dev, mr);
-                               else
-                                       dma_free_coherent(dev, npages * 8,
-                                                         mr->pbl_buf,
-                                                         mr->pbl_dma_addr);
-                       }
-
-                       goto release_umem;
-               }
-       }
-
        ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
        if (ret) {
                dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
-               goto release_umem;
+               ib_umem_release(mr->umem);
+               goto free_cmd_mbox;
        }
 
        mr->enabled = 1;
@@ -1320,9 +1348,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
 
        return 0;
 
-release_umem:
-       ib_umem_release(mr->umem);
-
 free_cmd_mbox:
        hns_roce_free_cmd_mailbox(hr_dev, mailbox);