RDMA/mlx5: Merge similar flows of allocating MR from the cache
authorAharon Landau <aharonl@nvidia.com>
Tue, 15 Feb 2022 17:55:31 +0000 (19:55 +0200)
committerJason Gunthorpe <jgg@nvidia.com>
Wed, 23 Feb 2022 18:59:13 +0000 (14:59 -0400)
When allocating a MR from the cache, the driver calls to get_cache_mr(),
and in case of failure, retries with create_cache_mr(). This is the flow
of mlx5_mr_cache_alloc(), so use it instead.

Link: https://lore.kernel.org/r/53c85fcd4de6ec9de0b8e6cbb1bf5d5fe19900c3.1644947594.git.leonro@nvidia.com
Signed-off-by: Aharon Landau <aharonl@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c

index bacb6900b39f8926d163045399e702dce192bc34..751d02bc755b155cf10640806f6556d74eb9e1dd 100644 (file)
@@ -1343,7 +1343,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
 
 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
-                                      unsigned int entry, int access_flags);
+                                      struct mlx5_cache_ent *ent,
+                                      int access_flags);
 
 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
                            struct ib_mr_status *mr_status);
index bce3cb6af5243880dc7c230564bdd85ddcdd433d..0c1dc13b4c45dfb4881b910db20c6aaafc6b67fa 100644 (file)
@@ -558,23 +558,16 @@ static void delayed_cache_work_func(struct work_struct *work)
        __cache_work_func(ent);
 }
 
-/* Allocate a special entry from the cache */
 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
-                                      unsigned int entry, int access_flags)
+                                      struct mlx5_cache_ent *ent,
+                                      int access_flags)
 {
-       struct mlx5_mr_cache *cache = &dev->cache;
-       struct mlx5_cache_ent *ent;
        struct mlx5_ib_mr *mr;
 
-       if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
-                   entry >= ARRAY_SIZE(cache->ent)))
-               return ERR_PTR(-EINVAL);
-
        /* Matches access in alloc_cache_mr() */
        if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
                return ERR_PTR(-EOPNOTSUPP);
 
-       ent = &cache->ent[entry];
        spin_lock_irq(&ent->lock);
        if (list_empty(&ent->head)) {
                queue_adjust_cache_locked(ent);
@@ -592,32 +585,9 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
 
                mlx5_clear_mr(mr);
        }
-       mr->access_flags = access_flags;
        return mr;
 }
 
-/* Return a MR already available in the cache */
-static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
-{
-       struct mlx5_ib_mr *mr = NULL;
-       struct mlx5_cache_ent *ent = req_ent;
-
-       spin_lock_irq(&ent->lock);
-       if (!list_empty(&ent->head)) {
-               mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
-               list_del(&mr->list);
-               ent->available_mrs--;
-               queue_adjust_cache_locked(ent);
-               spin_unlock_irq(&ent->lock);
-               mlx5_clear_mr(mr);
-               return mr;
-       }
-       queue_adjust_cache_locked(ent);
-       spin_unlock_irq(&ent->lock);
-       req_ent->miss++;
-       return NULL;
-}
-
 static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        struct mlx5_cache_ent *ent = mr->cache_ent;
@@ -951,16 +921,9 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
                return mr;
        }
 
-       mr = get_cache_mr(ent);
-       if (!mr) {
-               mr = create_cache_mr(ent);
-               /*
-                * The above already tried to do the same stuff as reg_create(),
-                * no reason to try it again.
-                */
-               if (IS_ERR(mr))
-                       return mr;
-       }
+       mr = mlx5_mr_cache_alloc(dev, ent, access_flags);
+       if (IS_ERR(mr))
+               return mr;
 
        mr->ibmr.pd = pd;
        mr->umem = umem;
index 86842cd580ba85e70fb53233678fef93beac03b6..f834c9590c514583251051ca0944a8e8731188b4 100644 (file)
@@ -407,6 +407,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
 static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
                                                unsigned long idx)
 {
+       struct mlx5_ib_dev *dev = mr_to_mdev(imr);
        struct ib_umem_odp *odp;
        struct mlx5_ib_mr *mr;
        struct mlx5_ib_mr *ret;
@@ -418,13 +419,14 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
        if (IS_ERR(odp))
                return ERR_CAST(odp);
 
-       mr = mlx5_mr_cache_alloc(
-               mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags);
+       mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
+                                imr->access_flags);
        if (IS_ERR(mr)) {
                ib_umem_odp_release(odp);
                return mr;
        }
 
+       mr->access_flags = imr->access_flags;
        mr->ibmr.pd = imr->ibmr.pd;
        mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
        mr->umem = &odp->umem;
@@ -493,12 +495,15 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
        if (IS_ERR(umem_odp))
                return ERR_CAST(umem_odp);
 
-       imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY, access_flags);
+       imr = mlx5_mr_cache_alloc(dev,
+                                 &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
+                                 access_flags);
        if (IS_ERR(imr)) {
                ib_umem_odp_release(umem_odp);
                return imr;
        }
 
+       imr->access_flags = access_flags;
        imr->ibmr.pd = &pd->ibpd;
        imr->ibmr.iova = 0;
        imr->umem = &umem_odp->umem;