nvmet-tcp: fix a segmentation fault during io parsing error
authorElad Grupi <elad.grupi@dell.com>
Wed, 31 Mar 2021 09:13:14 +0000 (17:13 +0800)
committerChristoph Hellwig <hch@lst.de>
Thu, 15 Apr 2021 06:12:50 +0000 (08:12 +0200)
In case there is an io that contains inline data and it goes to
parsing error flow, command response will free command and iov
before clearing the data on the socket buffer.
This will delay the command response until receive flow is completed.

Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver")
Signed-off-by: Elad Grupi <elad.grupi@dell.com>
Signed-off-by: Hou Pu <houpu.main@gmail.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/tcp.c

index 558a973277fd7063c486bf4884ad55297a2cb3f8..e14235811ba18b71e41c55c37c16ff84939038b8 100644 (file)
@@ -537,11 +537,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
        struct nvmet_tcp_cmd *cmd =
                container_of(req, struct nvmet_tcp_cmd, req);
        struct nvmet_tcp_queue  *queue = cmd->queue;
+       struct nvme_sgl_desc *sgl;
+       u32 len;
+
+       if (unlikely(cmd == queue->cmd)) {
+               sgl = &cmd->req.cmd->common.dptr.sgl;
+               len = le32_to_cpu(sgl->length);
+
+               /*
+                * Wait for inline data before processing the response.
+                * Avoid using helpers, this might happen before
+                * nvmet_req_init is completed.
+                */
+               if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+                   len && len < cmd->req.port->inline_data_size &&
+                   nvme_is_write(cmd->req.cmd))
+                       return;
+       }
 
        llist_add(&cmd->lentry, &queue->resp_list);
        queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
 }
 
+static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
+{
+       if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
+               nvmet_tcp_queue_response(&cmd->req);
+       else
+               cmd->req.execute(&cmd->req);
+}
+
 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
 {
        u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
@@ -973,7 +998,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
                        le32_to_cpu(req->cmd->common.dptr.sgl.length));
 
                nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
-               return -EAGAIN;
+               return 0;
        }
 
        ret = nvmet_tcp_map_data(queue->cmd);
@@ -1116,10 +1141,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
        }
        nvmet_tcp_unmap_pdu_iovec(cmd);
 
-       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
-           cmd->rbytes_done == cmd->req.transfer_len) {
-               cmd->req.execute(&cmd->req);
-       }
+       if (cmd->rbytes_done == cmd->req.transfer_len)
+               nvmet_tcp_execute_request(cmd);
 
        nvmet_prepare_receive_pdu(queue);
        return 0;
@@ -1156,9 +1179,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
                goto out;
        }
 
-       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
-           cmd->rbytes_done == cmd->req.transfer_len)
-               cmd->req.execute(&cmd->req);
+       if (cmd->rbytes_done == cmd->req.transfer_len)
+               nvmet_tcp_execute_request(cmd);
+
        ret = 0;
 out:
        nvmet_prepare_receive_pdu(queue);