[PATCH v2] nvmet-tcp: fix a segmentation fault during io parsing error
Hou Pu
houpu.main at gmail.com
Sun Mar 28 05:04:27 BST 2021
On Date: Fri, 26 Mar 2021 20:59:41 +0300, Elad Grupi wrote:
> In case there is an io that contains inline data and it goes to
> parsing error flow, command response will free command and iov
> before clearing the data on the socket buffer.
> This will delay the command response until receive flow is completed.
>
> Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver")
> Signed-off-by: Elad Grupi <elad.grupi at dell.com>
> ---
> drivers/nvme/target/tcp.c | 29 +++++++++++++++++++++++------
> 1 file changed, 23 insertions(+), 6 deletions(-)
Hi Elad,
Just noticed that we still queue the failed REQ to the response queue
in this version. Am I missing something?
Is this still needed? (I think it is).
@@ -526,6 +527,12 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
+ if (unlikely((cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+ nvmet_tcp_has_inline_data(cmd))) {
+ /* fail the cmd when we finish processing the inline data */
+ return;
+ }
+
Thanks,
Hou
> diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
> index 70cc507d1565..3bc0caa47873 100644
> --- a/drivers/nvme/target/tcp.c
> +++ b/drivers/nvme/target/tcp.c
> @@ -702,6 +702,17 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
> return 0;
> }
>
> + if (unlikely((cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
> + nvmet_tcp_has_data_in(cmd) &&
> + nvmet_tcp_has_inline_data(cmd))) {
> + /*
> + * wait for inline data before processing the response
> + * so the iov will not be freed
> + */
> + queue->snd_cmd = NULL;
> + goto done_send;
> + }
> +
> if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
> ret = nvmet_try_send_data_pdu(cmd);
> if (ret <= 0)
> @@ -1103,9 +1114,11 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
> return 0;
> }
>
> - if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
> - cmd->rbytes_done == cmd->req.transfer_len) {
> - cmd->req.execute(&cmd->req);
> + if (cmd->rbytes_done == cmd->req.transfer_len) {
> + if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
> + nvmet_tcp_queue_response(&cmd->req);
> + else
> + cmd->req.execute(&cmd->req);
> }
>
> nvmet_prepare_receive_pdu(queue);
> @@ -1143,9 +1156,13 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
> goto out;
> }
>
> - if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
> - cmd->rbytes_done == cmd->req.transfer_len)
> - cmd->req.execute(&cmd->req);
> + if (cmd->rbytes_done == cmd->req.transfer_len) {
> + if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
> + nvmet_tcp_queue_response(&cmd->req);
> + else
> + cmd->req.execute(&cmd->req);
> + }
> +
> ret = 0;
> out:
> nvmet_prepare_receive_pdu(queue);
More information about the Linux-nvme
mailing list