scsi: ufs: core: Prepare ufshcd_send_command() for MCQ
authorAsutosh Das <quic_asutoshd@quicinc.com>
Fri, 13 Jan 2023 20:48:47 +0000 (12:48 -0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Sat, 14 Jan 2023 02:03:37 +0000 (21:03 -0500)
Add support to send commands using multiple submission queues in MCQ mode.
Modify the functions that use ufshcd_send_command().

Co-developed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Manivannan Sadhasivam <mani@kernel.org>
Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/ufs/core/ufs-mcq.c
drivers/ufs/core/ufshcd-priv.h
drivers/ufs/core/ufshcd.c
include/ufs/ufshcd.h

index 8bf222fb4b0661a660fa0582645956a932ad7965..68158259b223c15eb8ed03751266759c1efdc963 100644 (file)
@@ -309,6 +309,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
        for (i = 0; i < hba->nr_hw_queues; i++) {
                hwq = &hba->uhq[i];
                hwq->max_entries = hba->nutrs;
+               spin_lock_init(&hwq->sq_lock);
        }
 
        /* The very first HW queue serves device commands */
index 4cd9b7b63868ff12ae94aadd9e4350c801e5d5dc..013111fca0f75b251a7832d23e3645bf704d2cf4 100644 (file)
@@ -335,4 +335,14 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
        return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
 }
 
+static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
+{
+       u32 mask = q->max_entries - 1;
+       u32 val;
+
+       q->sq_tail_slot = (q->sq_tail_slot + 1) & mask;
+       val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc);
+       writel(val, q->mcq_sq_tail);
+}
+
 #endif /* _UFSHCD_PRIV_H_ */
index a492c88ccecb5b700d5dfb3eb5b1860cef714972..655903b1d8f7af1f6ed6e8adeaba67769e5d7a7b 100644 (file)
@@ -2185,9 +2185,11 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
  * ufshcd_send_command - Send SCSI or device management commands
  * @hba: per adapter instance
  * @task_tag: Task tag of the command
+ * @hwq: pointer to hardware queue instance
  */
 static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
+                        struct ufs_hw_queue *hwq)
 {
        struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
        unsigned long flags;
@@ -2201,12 +2203,24 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
        if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
                ufshcd_start_monitor(hba, lrbp);
 
-       spin_lock_irqsave(&hba->outstanding_lock, flags);
-       if (hba->vops && hba->vops->setup_xfer_req)
-               hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
-       __set_bit(task_tag, &hba->outstanding_reqs);
-       ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
-       spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+       if (is_mcq_enabled(hba)) {
+               int utrd_size = sizeof(struct utp_transfer_req_desc);
+
+               spin_lock(&hwq->sq_lock);
+               memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size),
+                      lrbp->utr_descriptor_ptr, utrd_size);
+               ufshcd_inc_sq_tail(hwq);
+               spin_unlock(&hwq->sq_lock);
+       } else {
+               spin_lock_irqsave(&hba->outstanding_lock, flags);
+               if (hba->vops && hba->vops->setup_xfer_req)
+                       hba->vops->setup_xfer_req(hba, lrbp->task_tag,
+                                                 !!lrbp->cmd);
+               __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
+               ufshcd_writel(hba, 1 << lrbp->task_tag,
+                             REG_UTP_TRANSFER_REQ_DOOR_BELL);
+               spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+       }
 }
 
 /**
@@ -2836,6 +2850,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        int tag = scsi_cmd_to_rq(cmd)->tag;
        struct ufshcd_lrb *lrbp;
        int err = 0;
+       struct ufs_hw_queue *hwq = NULL;
 
        WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
 
@@ -2920,7 +2935,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                goto out;
        }
 
-       ufshcd_send_command(hba, tag);
+       ufshcd_send_command(hba, tag, hwq);
 
 out:
        rcu_read_unlock();
@@ -3121,10 +3136,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
                goto out;
 
        hba->dev_cmd.complete = &wait;
+       hba->dev_cmd.cqe = NULL;
 
        ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
 
-       ufshcd_send_command(hba, tag);
+       ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
        ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
                                    (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
@@ -6938,7 +6954,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
 
        ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
 
-       ufshcd_send_command(hba, tag);
+       ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
        /*
         * ignore the returning value here - ufshcd_check_query_response is
         * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -7104,7 +7120,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
 
        hba->dev_cmd.complete = &wait;
 
-       ufshcd_send_command(hba, tag);
+       ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
 
        err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
 
index 13a2f17daa8c890e0a5183edf0d29583b6dc8947..019b8cf23b29b01b76bad6bfd3bc322ebd673f0a 100644 (file)
@@ -224,6 +224,7 @@ struct ufs_dev_cmd {
        struct mutex lock;
        struct completion *complete;
        struct ufs_query query;
+       struct cq_entry *cqe;
 };
 
 /**
@@ -1078,6 +1079,8 @@ struct ufs_hba {
  * @cqe_dma_addr: completion queue dma address
  * @max_entries: max number of slots in this hardware queue
  * @id: hardware queue ID
+ * @sq_tp_slot: current slot to which SQ tail pointer is pointing
+ * @sq_lock: serialize submission queue access
  */
 struct ufs_hw_queue {
        void __iomem *mcq_sq_head;
@@ -1091,6 +1094,8 @@ struct ufs_hw_queue {
        dma_addr_t cqe_dma_addr;
        u32 max_entries;
        u32 id;
+       u32 sq_tail_slot;
+       spinlock_t sq_lock;
 };
 
 static inline bool is_mcq_enabled(struct ufs_hba *hba)