In preparation for adding MCQ error handler support, update the MCQ code to
use the ufshcd_mcq_poll_cqe_lock() in interrupt context instead of using
ufshcd_mcq_poll_cqe_nolock(). This is to keep synchronization between MCQ
interrupt and error handler contexts because both need to access the MCQ
hardware in separate contexts.
Signed-off-by: Bao D. Nguyen <quic_nguyenb@quicinc.com>
Link: https://lore.kernel.org/r/6ae727ad2a4040469b8f0632b55e0577d80da11b.1685396241.git.quic_nguyenb@quicinc.com
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>
Tested-by: Stanley Chu <stanley.chu@mediatek.com>
Reviewed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
 
        ufshcd_compl_one_cqe(hba, tag, cqe);
 }
 
-unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
-                                        struct ufs_hw_queue *hwq)
+static unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+                                               struct ufs_hw_queue *hwq)
 {
        unsigned long completed_reqs = 0;
 
 
        return completed_reqs;
 }
-EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
 
 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
                                       struct ufs_hw_queue *hwq)
 
        return completed_reqs;
 }
+EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
 
 void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
 {
 
 void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
 u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
-unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
-                                        struct ufs_hw_queue *hwq);
 struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
                                           struct request *req);
 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
 
                        ufshcd_mcq_write_cqis(hba, events, i);
 
                if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
-                       ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+                       ufshcd_mcq_poll_cqe_lock(hba, hwq);
        }
 
        return IRQ_HANDLED;
 
        struct ufs_hw_queue *hwq = &hba->uhq[id];
 
        ufshcd_mcq_write_cqis(hba, 0x1, id);
-       ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+       ufshcd_mcq_poll_cqe_lock(hba, hwq);
 
        return IRQ_HANDLED;
 }
 
 void ufshcd_hba_stop(struct ufs_hba *hba);
 void ufshcd_schedule_eh_work(struct ufs_hba *hba);
 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
-unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
                                         struct ufs_hw_queue *hwq);
 void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
 void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);