* make sure we signal QP destroy leg that flush QP was completed
* so that it can safely proceed ahead now and destroy QP
*/
- if (atomic_dec_and_test(&hr_qp->refcount))
+ if (refcount_dec_and_test(&hr_qp->refcount))
complete(&hr_qp->free);
}
flush_work->hr_dev = hr_dev;
INIT_WORK(&flush_work->work, flush_work_handle);
- atomic_inc(&hr_qp->refcount);
+ refcount_inc(&hr_qp->refcount);
queue_work(hr_dev->irq_workq, &flush_work->work);
}
xa_lock(&hr_dev->qp_table_xa);
qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (qp)
- atomic_inc(&qp->refcount);
+ refcount_inc(&qp->refcount);
xa_unlock(&hr_dev->qp_table_xa);
if (!qp) {
qp->event(qp, (enum hns_roce_event)event_type);
- if (atomic_dec_and_test(&qp->refcount))
+ if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
}
hr_qp->ibqp.qp_num = hr_qp->qpn;
hr_qp->event = hns_roce_ib_qp_event;
- atomic_set(&hr_qp->refcount, 1);
+ refcount_set(&hr_qp->refcount, 1);
init_completion(&hr_qp->free);
return 0;
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_udata *udata)
{
- if (atomic_dec_and_test(&hr_qp->refcount))
+ if (refcount_dec_and_test(&hr_qp->refcount))
complete(&hr_qp->free);
wait_for_completion(&hr_qp->free);