spin_unlock_irq(&ctrl->lock);
 }
 
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
-               struct nvme_completion *cqe)
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+               union nvme_result *res)
 {
-       u16 status = le16_to_cpu(cqe->status) >> 1;
-       u32 result = le32_to_cpu(cqe->result.u32);
+       u32 result = le32_to_cpu(res->u32);
+       bool done = true;
 
-       if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
+       switch (le16_to_cpu(status) >> 1) {
+       case NVME_SC_SUCCESS:
+               done = false;
+               /*FALLTHRU*/
+       case NVME_SC_ABORT_REQ:
                ++ctrl->event_limit;
                schedule_work(&ctrl->async_event_work);
+               break;
+       default:
+               break;
        }
 
-       if (status != NVME_SC_SUCCESS)
+       if (done)
                return;
 
        switch (result & 0xff07) {
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 
 #define NVME_NR_AERS   1
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
-               struct nvme_completion *cqe);
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+               union nvme_result *res);
 void nvme_queue_async_events(struct nvme_ctrl *ctrl);
 
 void nvme_stop_queues(struct nvme_ctrl *ctrl);
 
                 */
                if (unlikely(nvmeq->qid == 0 &&
                                cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
-                       nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
+                       nvme_complete_async_event(&nvmeq->dev->ctrl,
+                                       cqe.status, &cqe.result);
                        continue;
                }
 
 
         */
        if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
                        cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
-               nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
+               nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+                               &cqe->result);
        else
                ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
        ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
 
         */
        if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
                        cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
-               nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
+               nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
+                               &cqe->result);
        } else {
                struct request *rq = blk_mq_rq_from_pdu(iod);