[PATCH] nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec
Maurizio Lombardi
mlombard at arkamax.eu
Tue Mar 3 02:59:32 PST 2026
On Wed Feb 25, 2026 at 9:06 PM CET, Guenter Roeck wrote:
> An experimental AI agent provided the following review feedback.
>
> If we return early here (or in the other bounds checks added below),
> cmd->recv_msg.msg_iter is left uninitialized because we skip the
> iov_iter_bvec() call at the end of the function.
>
> Since nvmet_tcp_build_pdu_iovec() returns void, callers are unaware
> of the failure. For example, in nvmet_tcp_handle_h2c_data_pdu():
>
> nvmet_tcp_build_pdu_iovec(cmd);
> queue->cmd = cmd;
> queue->rcv_state = NVMET_TCP_RECV_DATA;
>
> Even though nvmet_tcp_fatal_error() correctly sets rcv_state to
> NVMET_TCP_RECV_ERR, the caller immediately overwrites it with
> NVMET_TCP_RECV_DATA.
>
> Does this cause the state machine to proceed and attempt to receive
> data using the uninitialized cmd->recv_msg.msg_iter? Should this
> function return an error code so callers can handle the failure?
In my opinion this is a valid concern.
I suspect that it's really possible to create a malicious sequence
that would make the target crash.
I would change nvmet_tcp_build_pdu_iovec() to return an error
code and its caller so they propagate it up to nvmet_tcp_done_recv_pdu()
Something like this:
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index acc71a26733f..3d97257075d3 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -351,7 +351,7 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue);
-static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
struct bio_vec *iov = cmd->iov;
struct scatterlist *sg;
@@ -364,22 +364,19 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
offset = cmd->rbytes_done;
cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
- if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) {
- nvmet_tcp_fatal_error(cmd->queue);
- return;
- }
+ if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt)
+ return -EPROTO;
+
sg = &cmd->req.sg[cmd->sg_idx];
sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
while (length) {
- if (!sg_remaining) {
- nvmet_tcp_fatal_error(cmd->queue);
- return;
- }
- if (!sg->length || sg->length <= sg_offset) {
- nvmet_tcp_fatal_error(cmd->queue);
- return;
- }
+ if (!sg_remaining)
+ return -EPROTO;
+
+ if (!sg->length || sg->length <= sg_offset)
+ return -EPROTO;
+
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
bvec_set_page(iov, sg_page(sg), iov_len,
@@ -394,6 +391,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
nr_pages, cmd->pdu_len);
+ return 0;
}
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
@@ -931,7 +929,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
return 0;
}
-static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
+static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
{
size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
@@ -947,19 +945,19 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
if (!nvme_is_write(cmd->req.cmd) || !data_len ||
data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
- return;
+ return 0;
}
ret = nvmet_tcp_map_data(cmd);
if (unlikely(ret)) {
pr_err("queue %d: failed to map data\n", queue->idx);
nvmet_tcp_fatal_error(queue);
- return;
+ return ret;
}
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_build_pdu_iovec(cmd);
cmd->flags |= NVMET_TCP_F_INIT_FAILED;
+ return nvmet_tcp_build_pdu_iovec(cmd);
}
static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
@@ -1011,7 +1009,8 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
goto err_proto;
}
cmd->pdu_recv = 0;
- nvmet_tcp_build_pdu_iovec(cmd);
+ if (nvmet_tcp_build_pdu_iovec(cmd))
+ goto err_proto;
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
@@ -1074,8 +1073,10 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
le32_to_cpu(req->cmd->common.dptr.sgl.length),
le16_to_cpu(req->cqe->status));
- nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
- return 0;
+ ret = nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
+ if (unlikely(ret))
+ nvmet_tcp_fatal_error(queue);
+ return ret;
}
ret = nvmet_tcp_map_data(queue->cmd);
@@ -1092,8 +1093,10 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (nvmet_tcp_need_data_in(queue->cmd)) {
if (nvmet_tcp_has_inline_data(queue->cmd)) {
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_build_pdu_iovec(queue->cmd);
- return 0;
+ ret = nvmet_tcp_build_pdu_iovec(queue->cmd);
+ if (unlikely(ret))
+ nvmet_tcp_fatal_error(queue);
+ return ret;
}
/* send back R2T */
nvmet_tcp_queue_response(&queue->cmd->req);
Maurizio
More information about the Linux-nvme
mailing list