RDMA/hns: Use refcount_t instead of atomic_t for QP reference counting
authorWeihang Li <liweihang@huawei.com>
Fri, 28 May 2021 09:37:41 +0000 (17:37 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 8 Jun 2021 17:58:51 +0000 (14:58 -0300)
The refcount_t API will WARN on underflow and overflow of a reference
counter, and avoid use-after-free risks.

Link: https://lore.kernel.org/r/1622194663-2383-11-git-send-email-liweihang@huawei.com
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_qp.c

index bd190219755144123a5c195d233b705232bbc6fd..7d00d4cdaa7603a6d433e19c39f19b84eebf86a1 100644 (file)
@@ -639,7 +639,7 @@ struct hns_roce_qp {
 
        u32                     xrcdn;
 
-       atomic_t                refcount;
+       refcount_t              refcount;
        struct completion       free;
 
        struct hns_roce_sge     sge;
index 9203cf189dd552fb71bc55e67edf63b14685ed16..3a018a308a602655b4d0863aecd2e68e05c2ca6a 100644 (file)
@@ -65,7 +65,7 @@ static void flush_work_handle(struct work_struct *work)
         * make sure we signal QP destroy leg that flush QP was completed
         * so that it can safely proceed ahead now and destroy QP
         */
-       if (atomic_dec_and_test(&hr_qp->refcount))
+       if (refcount_dec_and_test(&hr_qp->refcount))
                complete(&hr_qp->free);
 }
 
@@ -75,7 +75,7 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 
        flush_work->hr_dev = hr_dev;
        INIT_WORK(&flush_work->work, flush_work_handle);
-       atomic_inc(&hr_qp->refcount);
+       refcount_inc(&hr_qp->refcount);
        queue_work(hr_dev->irq_workq, &flush_work->work);
 }
 
@@ -87,7 +87,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
        xa_lock(&hr_dev->qp_table_xa);
        qp = __hns_roce_qp_lookup(hr_dev, qpn);
        if (qp)
-               atomic_inc(&qp->refcount);
+               refcount_inc(&qp->refcount);
        xa_unlock(&hr_dev->qp_table_xa);
 
        if (!qp) {
@@ -108,7 +108,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
 
        qp->event(qp, (enum hns_roce_event)event_type);
 
-       if (atomic_dec_and_test(&qp->refcount))
+       if (refcount_dec_and_test(&qp->refcount))
                complete(&qp->free);
 }
 
@@ -1076,7 +1076,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 
        hr_qp->ibqp.qp_num = hr_qp->qpn;
        hr_qp->event = hns_roce_ib_qp_event;
-       atomic_set(&hr_qp->refcount, 1);
+       refcount_set(&hr_qp->refcount, 1);
        init_completion(&hr_qp->free);
 
        return 0;
@@ -1099,7 +1099,7 @@ err_buf:
 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
                         struct ib_udata *udata)
 {
-       if (atomic_dec_and_test(&hr_qp->refcount))
+       if (refcount_dec_and_test(&hr_qp->refcount))
                complete(&hr_qp->free);
        wait_for_completion(&hr_qp->free);