scsi: ufs: core: mcq: Use shared tags for MCQ mode
authorAsutosh Das <quic_asutoshd@quicinc.com>
Fri, 13 Jan 2023 20:48:46 +0000 (12:48 -0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Sat, 14 Jan 2023 02:03:37 +0000 (21:03 -0500)
Enable shared tags for MCQ. For UFS, this should not have a huge
performance impact. It however simplifies the MCQ implementation and reuses
most of the existing code in the issue and completion path.  Also add
multiple queue mapping to map_queue().

Co-developed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Manivannan Sadhasivam <mani@kernel.org>
Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/ufs/core/ufs-mcq.c
drivers/ufs/core/ufshcd.c

index 496e2b638c445585d482a6188dbf039eac7ddf61..8bf222fb4b0661a660fa0582645956a932ad7965 100644 (file)
@@ -280,6 +280,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
 
 int ufshcd_mcq_init(struct ufs_hba *hba)
 {
+       struct Scsi_Host *host = hba->host;
        struct ufs_hw_queue *hwq;
        int ret, i;
 
@@ -315,5 +316,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
        /* Give dev_cmd_queue the minimal number of entries */
        hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
 
+       host->host_tagset = 1;
        return 0;
 }
index 9d582786dabe2d74ba8ee94cb9388df8c58379f1..a492c88ccecb5b700d5dfb3eb5b1860cef714972 100644 (file)
@@ -2776,24 +2776,28 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
  */
 static void ufshcd_map_queues(struct Scsi_Host *shost)
 {
-       int i;
+       struct ufs_hba *hba = shost_priv(shost);
+       int i, queue_offset = 0;
+
+       if (!is_mcq_supported(hba)) {
+               hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
+               hba->nr_queues[HCTX_TYPE_READ] = 0;
+               hba->nr_queues[HCTX_TYPE_POLL] = 1;
+               hba->nr_hw_queues = 1;
+       }
 
        for (i = 0; i < shost->nr_maps; i++) {
                struct blk_mq_queue_map *map = &shost->tag_set.map[i];
 
-               switch (i) {
-               case HCTX_TYPE_DEFAULT:
-               case HCTX_TYPE_POLL:
-                       map->nr_queues = 1;
-                       break;
-               case HCTX_TYPE_READ:
-                       map->nr_queues = 0;
+               map->nr_queues = hba->nr_queues[i];
+               if (!map->nr_queues)
                        continue;
-               default:
-                       WARN_ON_ONCE(true);
-               }
-               map->queue_offset = 0;
+               map->queue_offset = queue_offset;
+               if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
+                       map->queue_offset = 0;
+
                blk_mq_map_queues(map);
+               queue_offset += map->nr_queues;
        }
 }