nvmet: introduce new max queue size configuration entry
authorMax Gurtovoy <mgurtovoy@nvidia.com>
Tue, 23 Jan 2024 14:40:31 +0000 (16:40 +0200)
committerKeith Busch <kbusch@kernel.org>
Sat, 2 Mar 2024 23:18:08 +0000 (15:18 -0800)
Using this port configuration, one will be able to set the maximal queue
size to be used for any controller that will be associated to the
configured port.

The default value stayed 1024 but each transport will be able to set the
its own values before enabling the port.

Introduce lower limit of 16 for minimal queue depth (same as we use in
the host fabrics drivers).

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Israel Rukshin <israelr@nvidia.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Guixin Liu <kanie@linux.alibaba.com>
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/nvmet.h

index 2482a0db25043c88f2cb3fa3fd0bda3adf8abbbf..77a6e817b31596998e4424aa8205f8cfd9219f1d 100644 (file)
@@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
 
 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
 
+static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
+               char *page)
+{
+       struct nvmet_port *port = to_nvmet_port(item);
+
+       return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
+}
+
+static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct nvmet_port *port = to_nvmet_port(item);
+       int ret;
+
+       if (nvmet_is_port_enabled(port, __func__))
+               return -EACCES;
+       ret = kstrtoint(page, 0, &port->max_queue_size);
+       if (ret) {
+               pr_err("Invalid value '%s' for max_queue_size\n", page);
+               return -EINVAL;
+       }
+       return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_max_queue_size);
+
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
                char *page)
@@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
        &nvmet_attr_addr_trtype,
        &nvmet_attr_addr_tsas,
        &nvmet_attr_param_inline_data_size,
+       &nvmet_attr_param_max_queue_size,
 #ifdef CONFIG_BLK_DEV_INTEGRITY
        &nvmet_attr_param_pi_enable,
 #endif
@@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
        INIT_LIST_HEAD(&port->subsystems);
        INIT_LIST_HEAD(&port->referrals);
        port->inline_data_size = -1;    /* < 0 == let the transport choose */
+       port->max_queue_size = -1;      /* < 0 == let the transport choose */
 
        port->disc_addr.portid = cpu_to_le16(portid);
        port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
index 5d50f731c326aad8ab4354cc1f6d257bd493e49b..6bbe4df0166ca56949a5f5b14ad90f68305d6f36 100644 (file)
@@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port)
        if (port->inline_data_size < 0)
                port->inline_data_size = 0;
 
+       /*
+        * If the transport didn't set the max_queue_size properly, then clamp
+        * it to the target limits. Also set default values in case the
+        * transport didn't set it at all.
+        */
+       if (port->max_queue_size < 0)
+               port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
+       else
+               port->max_queue_size = clamp_t(int, port->max_queue_size,
+                                              NVMET_MIN_QUEUE_SIZE,
+                                              NVMET_MAX_QUEUE_SIZE);
+
        port->enabled = true;
        port->tr_ops = ops;
        return 0;
@@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
        ctrl->cap |= (15ULL << 24);
        /* maximum queue entries supported: */
        if (ctrl->ops->get_max_queue_size)
-               ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+               ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
+                                  ctrl->port->max_queue_size) - 1;
        else
-               ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+               ctrl->cap |= ctrl->port->max_queue_size - 1;
 
        if (nvmet_is_passthru_subsys(ctrl->subsys))
                nvmet_passthrough_override_cap(ctrl);
index 144aca2fa6ad65b6a0072c8012f8b42fae50497d..7c6e7e65b0329662b9ecf2fcf603d974f468c9c7 100644 (file)
@@ -163,6 +163,7 @@ struct nvmet_port {
        void                            *priv;
        bool                            enabled;
        int                             inline_data_size;
+       int                             max_queue_size;
        const struct nvmet_fabrics_ops  *tr_ops;
        bool                            pi_enable;
 };
@@ -543,7 +544,8 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
                u8 event_info, u8 log_page);
 
-#define NVMET_QUEUE_SIZE       1024
+#define NVMET_MIN_QUEUE_SIZE   16
+#define NVMET_MAX_QUEUE_SIZE   1024
 #define NVMET_NR_QUEUES                128
 #define NVMET_MAX_CMD(ctrl)    (NVME_CAP_MQES(ctrl->cap) + 1)