RDMA/mlx4: Use ib_umem_num_dma_blocks()
authorJason Gunthorpe <jgg@nvidia.com>
Fri, 4 Sep 2020 22:41:56 +0000 (19:41 -0300)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 11 Sep 2020 13:24:54 +0000 (10:24 -0300)
For the calls linked to mlx4_ib_umem_calc_optimal_mtt_size() use
ib_umem_num_dma_blocks() inside the function, it is just some weird static
default.

All other places are just using it with PAGE_SIZE, switch to
ib_umem_num_dma_blocks().

As this is the last call site, remove ib_umem_num_count().

Link: https://lore.kernel.org/r/15-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/core/umem.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/srq.c
include/rdma/ib_umem.h

index b57dbb14de83789b7b6cf3f52f411442804d3c74..c1ab6a4f2bc386c63014e96f548e0ade093058f9 100644 (file)
@@ -350,18 +350,6 @@ void ib_umem_release(struct ib_umem *umem)
 }
 EXPORT_SYMBOL(ib_umem_release);
 
-int ib_umem_page_count(struct ib_umem *umem)
-{
-       int i, n = 0;
-       struct scatterlist *sg;
-
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
-               n += sg_dma_len(sg) >> PAGE_SHIFT;
-
-       return n;
-}
-EXPORT_SYMBOL(ib_umem_page_count);
-
 /*
  * Copy from the given ib_umem's pages to the given buffer.
  *
index ee50dd823a8e8dc6baa5e96c03f769504c7f1228..e9b5a4d57fb1b550c1383020e3c38982404379c0 100644 (file)
@@ -149,7 +149,6 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
        if (IS_ERR(*umem))
                return PTR_ERR(*umem);
 
-       n = ib_umem_page_count(*umem);
        shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
        err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
 
index 1d5ef0de12c9509ad9192443f41bca5ee81b2830..bfb779b5eeb3d263216f830af925f085b06cd46e 100644 (file)
@@ -271,6 +271,8 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
        u64 total_len = 0;
        int i;
 
+       *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
+
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
                /*
                 * Initialization - save the first chunk start as the
@@ -421,7 +423,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                goto err_free;
        }
 
-       n = ib_umem_page_count(mr->umem);
        shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
 
        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
@@ -511,7 +512,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
                        mmr->umem = NULL;
                        goto release_mpt_entry;
                }
-               n = ib_umem_page_count(mmr->umem);
+               n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
                shift = PAGE_SHIFT;
 
                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
index b7a0c3f9771310fb2e5f6c63c5d06729884a2bdb..1c94a2c4463a950e4d17d4cb4f95cdf1e2aec8bd 100644 (file)
@@ -922,7 +922,6 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
                goto err;
        }
 
-       n = ib_umem_page_count(qp->umem);
        shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
        err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
 
@@ -1117,7 +1116,6 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
                        goto err;
                }
 
-               n = ib_umem_page_count(qp->umem);
                shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
                err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
 
index 2651b68a1c0417b0b9a8732999524737487273d4..bf618529e734d2b091fcc30b65dc8af4241221b4 100644 (file)
@@ -115,8 +115,9 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
                if (IS_ERR(srq->umem))
                        return PTR_ERR(srq->umem);
 
-               err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
-                                   PAGE_SHIFT, &srq->mtt);
+               err = mlx4_mtt_init(
+                       dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
+                       PAGE_SHIFT, &srq->mtt);
                if (err)
                        goto err_buf;
 
index cf001d4e0a4fa60483fd32eaa805178942313747..70597508c7656b6dbd30626000e4521f0c04e6e1 100644 (file)
@@ -74,7 +74,6 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
                            size_t size, int access);
 void ib_umem_release(struct ib_umem *umem);
-int ib_umem_page_count(struct ib_umem *umem);
 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
                      size_t length);
 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
@@ -92,7 +91,6 @@ static inline struct ib_umem *ib_umem_get(struct ib_device *device,
        return ERR_PTR(-EINVAL);
 }
 static inline void ib_umem_release(struct ib_umem *umem) { }
-static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
                                    size_t length) {
        return -EINVAL;