RDMA/hns: Fix UAF for cq async event
authorChengchang Tang <tangchengchang@huawei.com>
Fri, 12 Apr 2024 09:16:11 +0000 (17:16 +0800)
committerLeon Romanovsky <leon@kernel.org>
Tue, 16 Apr 2024 12:06:47 +0000 (15:06 +0300)
The refcount of CQ is not protected by locks. When CQ asynchronous
events and CQ destruction are concurrent, CQ may have been released,
which will cause UAF.

Use the xa_lock() to protect the CQ refcount.

Fixes: 9a4435375cd1 ("IB/hns: Add driver files for hns RoCE driver")
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
Link: https://lore.kernel.org/r/20240412091616.370789-6-huangjunxian6@hisilicon.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/hns/hns_roce_cq.c

index 7250d0643b5c5dd9baaa6e95388dc447d36c7ad0..68e22f368d43a3968069db5a5289b029690cbb41 100644 (file)
@@ -149,7 +149,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
                return ret;
        }
 
-       ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
+       ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
        if (ret) {
                ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
                goto err_put;
@@ -163,7 +163,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
        return 0;
 
 err_xa:
-       xa_erase(&cq_table->array, hr_cq->cqn);
+       xa_erase_irq(&cq_table->array, hr_cq->cqn);
 err_put:
        hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
 
@@ -182,7 +182,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
                dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
                        hr_cq->cqn);
 
-       xa_erase(&cq_table->array, hr_cq->cqn);
+       xa_erase_irq(&cq_table->array, hr_cq->cqn);
 
        /* Waiting interrupt process procedure carried out */
        synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
@@ -476,13 +476,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
        struct ib_event event;
        struct ib_cq *ibcq;
 
-       hr_cq = xa_load(&hr_dev->cq_table.array,
-                       cqn & (hr_dev->caps.num_cqs - 1));
-       if (!hr_cq) {
-               dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
-               return;
-       }
-
        if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
            event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
            event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
@@ -491,7 +484,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
                return;
        }
 
-       refcount_inc(&hr_cq->refcount);
+       xa_lock(&hr_dev->cq_table.array);
+       hr_cq = xa_load(&hr_dev->cq_table.array,
+                       cqn & (hr_dev->caps.num_cqs - 1));
+       if (hr_cq)
+               refcount_inc(&hr_cq->refcount);
+       xa_unlock(&hr_dev->cq_table.array);
+       if (!hr_cq) {
+               dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
+               return;
+       }
 
        ibcq = &hr_cq->ib_cq;
        if (ibcq->event_handler) {