[PATCH 1/3] nvme-tcp: avoid inline sending when handling R2T PDUs

Chris Leech cleech at redhat.com
Tue Mar 11 11:40:51 PDT 2025


On Fri, Mar 07, 2025 at 02:28:00PM +0100, Hannes Reinecke wrote:
> When handling an R2T PDU we should not attempt to send consecutive
> PDUs as we are running from an softirq context, and sending PDUs
> from the receive context will mess up latencies.
> So just queue it and let the io_work workqueue function do the work.
> 
> Signed-off-by: Hannes Reinecke <hare at kernel.org>
> ---
>  drivers/nvme/host/tcp.c | 12 +++++++-----
>  1 file changed, 7 insertions(+), 5 deletions(-)
> 

Am I missing something, or does this patch not actually change anything?
With sync=false nvme_tcp_queue_request will queue io_work, so what is
removing that flag and open coding in handle_rt2 accomplishing?

- Chris

> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 8c14018201db..034edf852878 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -391,7 +391,7 @@ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
>  }
>  
>  static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
> -		bool sync, bool last)
> +		bool last)
>  {
>  	struct nvme_tcp_queue *queue = req->queue;
>  	bool empty;
> @@ -405,7 +405,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
>  	 * are on the same cpu, so we don't introduce contention.
>  	 */
>  	if (queue->io_cpu == raw_smp_processor_id() &&
> -	    sync && empty && mutex_trylock(&queue->send_mutex)) {
> +	    empty && mutex_trylock(&queue->send_mutex)) {
>  		nvme_tcp_send_all(queue);
>  		mutex_unlock(&queue->send_mutex);
>  	}
> @@ -758,7 +758,9 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
>  	req->ttag = pdu->ttag;
>  
>  	nvme_tcp_setup_h2c_data_pdu(req);
> -	nvme_tcp_queue_request(req, false, true);
> +
> +	llist_add(&req->lentry, &queue->req_list);
> +	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
>  
>  	return 0;
>  }
> @@ -2531,7 +2533,7 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
>  	ctrl->async_req.curr_bio = NULL;
>  	ctrl->async_req.data_len = 0;
>  
> -	nvme_tcp_queue_request(&ctrl->async_req, true, true);
> +	nvme_tcp_queue_request(&ctrl->async_req, true);
>  }
>  
>  static void nvme_tcp_complete_timed_out(struct request *rq)
> @@ -2683,7 +2685,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
>  
>  	nvme_start_request(rq);
>  
> -	nvme_tcp_queue_request(req, true, bd->last);
> +	nvme_tcp_queue_request(req, bd->last);
>  
>  	return BLK_STS_OK;
>  }
> -- 
> 2.35.3
> 
> 




More information about the Linux-nvme mailing list