[PATCH V3 1/2] nvmet-tcp: propagate nvmet_tcp_build_pdu_iovec() errors to its callers

yunje shin yjshin0438 at gmail.com
Mon Mar 16 21:33:04 PDT 2026


Looks good to me.

Reviewed-by: Yunje Shin <ioerts at kookmin.ac.kr>

On Mon, Mar 16, 2026 at 11:39 PM Maurizio Lombardi <mlombard at redhat.com> wrote:
>
> Currently, when nvmet_tcp_build_pdu_iovec() detects an out-of-bounds
> PDU length or offset, it triggers nvmet_tcp_fatal_error(cmd->queue)
> and returns early. However, because the function returns void, the
> callers are entirely unaware that a fatal error has occurred and
> that the cmd->recv_msg.msg_iter was left uninitialized.
>
> Callers such as nvmet_tcp_handle_h2c_data_pdu() proceed to blindly
> overwrite the queue state with queue->rcv_state = NVMET_TCP_RECV_DATA
> Consequently, the socket receiving loop may attempt to read incoming
> network data into the uninitialized iterator.
>
> Fix this by shifting the error handling responsibility to the callers.
>
> Fixes: 52a0a9854934 ("nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec")
> Reviewed-by: Hannes Reinecke <hare at suse.de>
> Signed-off-by: Maurizio Lombardi <mlombard at redhat.com>
> ---
>  drivers/nvme/target/tcp.c | 51 ++++++++++++++++++++++-----------------
>  1 file changed, 29 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
> index acc71a26733f..1fbf12df1183 100644
> --- a/drivers/nvme/target/tcp.c
> +++ b/drivers/nvme/target/tcp.c
> @@ -351,7 +351,7 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
>
>  static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue);
>
> -static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
> +static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
>  {
>         struct bio_vec *iov = cmd->iov;
>         struct scatterlist *sg;
> @@ -364,22 +364,19 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
>         offset = cmd->rbytes_done;
>         cmd->sg_idx = offset / PAGE_SIZE;
>         sg_offset = offset % PAGE_SIZE;
> -       if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) {
> -               nvmet_tcp_fatal_error(cmd->queue);
> -               return;
> -       }
> +       if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt)
> +               return -EPROTO;
> +
>         sg = &cmd->req.sg[cmd->sg_idx];
>         sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
>
>         while (length) {
> -               if (!sg_remaining) {
> -                       nvmet_tcp_fatal_error(cmd->queue);
> -                       return;
> -               }
> -               if (!sg->length || sg->length <= sg_offset) {
> -                       nvmet_tcp_fatal_error(cmd->queue);
> -                       return;
> -               }
> +               if (!sg_remaining)
> +                       return -EPROTO;
> +
> +               if (!sg->length || sg->length <= sg_offset)
> +                       return -EPROTO;
> +
>                 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
>
>                 bvec_set_page(iov, sg_page(sg), iov_len,
> @@ -394,6 +391,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
>
>         iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
>                       nr_pages, cmd->pdu_len);
> +       return 0;
>  }
>
>  static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
> @@ -931,7 +929,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
>         return 0;
>  }
>
> -static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
> +static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
>                 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
>  {
>         size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
> @@ -947,19 +945,23 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
>         if (!nvme_is_write(cmd->req.cmd) || !data_len ||
>             data_len > cmd->req.port->inline_data_size) {
>                 nvmet_prepare_receive_pdu(queue);
> -               return;
> +               return 0;
>         }
>
>         ret = nvmet_tcp_map_data(cmd);
>         if (unlikely(ret)) {
>                 pr_err("queue %d: failed to map data\n", queue->idx);
>                 nvmet_tcp_fatal_error(queue);
> -               return;
> +               return -EPROTO;
>         }
>
>         queue->rcv_state = NVMET_TCP_RECV_DATA;
> -       nvmet_tcp_build_pdu_iovec(cmd);
>         cmd->flags |= NVMET_TCP_F_INIT_FAILED;
> +       ret = nvmet_tcp_build_pdu_iovec(cmd);
> +       if (unlikely(ret))
> +               pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
> +
> +       return ret;
>  }
>
>  static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
> @@ -1011,7 +1013,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
>                 goto err_proto;
>         }
>         cmd->pdu_recv = 0;
> -       nvmet_tcp_build_pdu_iovec(cmd);
> +       if (unlikely(nvmet_tcp_build_pdu_iovec(cmd))) {
> +               pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
> +               goto err_proto;
> +       }
>         queue->cmd = cmd;
>         queue->rcv_state = NVMET_TCP_RECV_DATA;
>
> @@ -1074,8 +1079,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
>                         le32_to_cpu(req->cmd->common.dptr.sgl.length),
>                         le16_to_cpu(req->cqe->status));
>
> -               nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
> -               return 0;
> +               return nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
>         }
>
>         ret = nvmet_tcp_map_data(queue->cmd);
> @@ -1092,8 +1096,11 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
>         if (nvmet_tcp_need_data_in(queue->cmd)) {
>                 if (nvmet_tcp_has_inline_data(queue->cmd)) {
>                         queue->rcv_state = NVMET_TCP_RECV_DATA;
> -                       nvmet_tcp_build_pdu_iovec(queue->cmd);
> -                       return 0;
> +                       ret = nvmet_tcp_build_pdu_iovec(queue->cmd);
> +                       if (unlikely(ret))
> +                               pr_err("queue %d: failed to build PDU iovec\n",
> +                                       queue->idx);
> +                       return ret;
>                 }
>                 /* send back R2T */
>                 nvmet_tcp_queue_response(&queue->cmd->req);
> --
> 2.53.0
>



More information about the Linux-nvme mailing list