Currently all the object types in the rxe driver are allocated in
rdma-core except for MRs. By moving tha kzalloc() call outside of
the pool code the rxe_alloc() subroutine can be eliminated and code
checking for MR as a special case can be removed.
This patch moves the kzalloc() and kfree_rcu() calls into the mr
registration and destruction verbs. It removes that code from
rxe_pool.c including the rxe_alloc() subroutine which is no longer
used.
Link: https://lore.kernel.org/r/20230213225551.12437-1-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Reviewed-by: Devesh Sharma <devesh.s.sharma@oracle.com>
Reviewed-by: Devesh Sharma <devesh.s.sharma@oracle.com>
Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
return -EINVAL;
rxe_cleanup(mr);
-
+ kfree_rcu(mr);
return 0;
}
WARN_ON(!xa_empty(&pool->xa));
}
-void *rxe_alloc(struct rxe_pool *pool)
-{
- struct rxe_pool_elem *elem;
- void *obj;
- int err;
-
- if (WARN_ON(!(pool->type == RXE_TYPE_MR)))
- return NULL;
-
- if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto err_cnt;
-
- obj = kzalloc(pool->elem_size, GFP_KERNEL);
- if (!obj)
- goto err_cnt;
-
- elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
-
- elem->pool = pool;
- elem->obj = obj;
- kref_init(&elem->ref_cnt);
- init_completion(&elem->complete);
-
- /* allocate index in array but leave pointer as NULL so it
- * can't be looked up until rxe_finalize() is called
- */
- err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
- &pool->next, GFP_KERNEL);
- if (err < 0)
- goto err_free;
-
- return obj;
-
-err_free:
- kfree(obj);
-err_cnt:
- atomic_dec(&pool->num_elem);
- return NULL;
-}
-
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
bool sleepable)
{
int err;
gfp_t gfp_flags;
- if (WARN_ON(pool->type == RXE_TYPE_MR))
- return -EINVAL;
-
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto err_cnt;
if (pool->cleanup)
pool->cleanup(elem);
- if (pool->type == RXE_TYPE_MR)
- kfree_rcu(elem->obj);
-
atomic_dec(&pool->num_elem);
return err;
/* free resources from object pool */
void rxe_pool_cleanup(struct rxe_pool *pool);
-/* allocate an object from pool */
-void *rxe_alloc(struct rxe_pool *pool);
-
/* connect already allocated object to pool */
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
bool sleepable);
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mr *mr;
+ int err;
- mr = rxe_alloc(&rxe->mr_pool);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err)
+ goto err_free;
rxe_get(pd);
mr->ibmr.pd = ibpd;
rxe_mr_init_dma(access, mr);
rxe_finalize(mr);
-
return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+err_out:
+ return ERR_PTR(err);
}
static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mr *mr;
- mr = rxe_alloc(&rxe->mr_pool);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err)
+ goto err_free;
rxe_get(pd);
mr->ibmr.pd = ibpd;
err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err)
- goto err1;
+ goto err_cleanup;
rxe_finalize(mr);
-
return &mr->ibmr;
-err1:
+err_cleanup:
rxe_cleanup(mr);
+err_free:
+ kfree(mr);
+err_out:
return ERR_PTR(err);
}
if (mr_type != IB_MR_TYPE_MEM_REG)
return ERR_PTR(-EINVAL);
- mr = rxe_alloc(&rxe->mr_pool);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err)
+ goto err_free;
rxe_get(pd);
mr->ibmr.pd = ibpd;
err = rxe_mr_init_fast(max_num_sg, mr);
if (err)
- goto err1;
+ goto err_cleanup;
rxe_finalize(mr);
-
return &mr->ibmr;
-err1:
+err_cleanup:
rxe_cleanup(mr);
+err_free:
+ kfree(mr);
+err_out:
return ERR_PTR(err);
}