[PATCH V2 1/2] nvmet-tcp: propagate nvmet_tcp_build_pdu_iovec() errors to its callers

Maurizio Lombardi mlombard at arkamax.eu
Fri Mar 13 00:45:11 PDT 2026


On Fri Mar 13, 2026 at 8:10 AM CET, Hannes Reinecke wrote:
> On 3/12/26 14:40, Maurizio Lombardi wrote:
>> Currently, when nvmet_tcp_build_pdu_iovec() detects an out-of-bounds
>> PDU length or offset, it triggers nvmet_tcp_fatal_error(cmd->queue)
>> and returns early. However, because the function returns void, the
>> callers are entirely unaware that a fatal error has occurred and
>> that the cmd->recv_msg.msg_iter was left uninitialized.
>> 
>> Callers such as nvmet_tcp_handle_h2c_data_pdu() proceed to blindly
>> overwrite the queue state with queue->rcv_state = NVMET_TCP_RECV_DATA
>> Consequently, the socket receiving loop may attempt to read incoming
>> network data into the uninitialized iterator.
>> 
>> Fix this by shifting the error handling responsibility to the callers.
>> 
>> Fixes: 52a0a9854934 ("nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec")
>> Signed-off-by: Maurizio Lombardi <mlombard at redhat.com>
>> ---
>>   drivers/nvme/target/tcp.c | 51 ++++++++++++++++++++++-----------------
>>   1 file changed, 29 insertions(+), 22 deletions(-)
>> 
>> diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
>> index acc71a26733f..1fbf12df1183 100644
>> --- a/drivers/nvme/target/tcp.c
>> +++ b/drivers/nvme/target/tcp.c
>> @@ -351,7 +351,7 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
>>   
>>   static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue);
>>   
>> -static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
>> +static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
>>   {
>>   	struct bio_vec *iov = cmd->iov;
>>   	struct scatterlist *sg;
>> @@ -364,22 +364,19 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
>>   	offset = cmd->rbytes_done;
>>   	cmd->sg_idx = offset / PAGE_SIZE;
>>   	sg_offset = offset % PAGE_SIZE;
>> -	if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) {
>> -		nvmet_tcp_fatal_error(cmd->queue);
>> -		return;
>> -	}
>> +	if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt)
>> +		return -EPROTO;
>> +
>>   	sg = &cmd->req.sg[cmd->sg_idx];
>>   	sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
>>   
>>   	while (length) {
>> -		if (!sg_remaining) {
>> -			nvmet_tcp_fatal_error(cmd->queue);
>> -			return;
>> -		}
>> -		if (!sg->length || sg->length <= sg_offset) {
>> -			nvmet_tcp_fatal_error(cmd->queue);
>> -			return;
>> -		}
>> +		if (!sg_remaining)
>> +			return -EPROTO;
>> +
>> +		if (!sg->length || sg->length <= sg_offset)
>> +			return -EPROTO;
>> +
>>   		u32 iov_len = min_t(u32, length, sg->length - sg_offset);
>>   
>>   		bvec_set_page(iov, sg_page(sg), iov_len,
>> @@ -394,6 +391,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
>>   
>>   	iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
>>   		      nr_pages, cmd->pdu_len);
>> +	return 0;
>>   }
>>   
>>   static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
>> @@ -931,7 +929,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
>>   	return 0;
>>   }
>>   
>> -static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
>> +static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
>>   		struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
>>   {
>>   	size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
>> @@ -947,19 +945,23 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
>>   	if (!nvme_is_write(cmd->req.cmd) || !data_len ||
>>   	    data_len > cmd->req.port->inline_data_size) {
>>   		nvmet_prepare_receive_pdu(queue);
>> -		return;
>> +		return 0;
>>   	}
>>   
>>   	ret = nvmet_tcp_map_data(cmd);
>>   	if (unlikely(ret)) {
>>   		pr_err("queue %d: failed to map data\n", queue->idx);
>>   		nvmet_tcp_fatal_error(queue);
>> -		return;
>> +		return -EPROTO;
>>   	}
>>   
>>   	queue->rcv_state = NVMET_TCP_RECV_DATA;
>> -	nvmet_tcp_build_pdu_iovec(cmd);
>>   	cmd->flags |= NVMET_TCP_F_INIT_FAILED;
>> +	ret = nvmet_tcp_build_pdu_iovec(cmd);
>> +	if (unlikely(ret))
>> +		pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
>
> Why don't we call 'nvmet_tcp_fatal_error()' here?
> The original code did ...

We don't need too, the error code is propagated up to
nvmet_tcp_done_recv_pdu, then up to nvmet_tcp_try_recv_pdu()
and then up to nvmet_tcp_try_recv_one(). Finally, it reaches
nvmet_tcp_try_recv() that checks the error code and calls
nvmet_tcp_socket_error(), because the error code is -EPROTO
nvmet_tcp_fatal_error() will be called.


Maurizio




More information about the Linux-nvme mailing list