[PATCH net-next v2 10/17] nvme: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage

Willem de Bruijn willemdebruijn.kernel at gmail.com
Sun Jun 18 09:47:56 PDT 2023


David Howells wrote:
> When transmitting data, call down into TCP using a single sendmsg with
> MSG_SPLICE_PAGES to indicate that content should be spliced rather than
> performing several sendmsg and sendpage calls to transmit header, data
> pages and trailer.
> 
> Signed-off-by: David Howells <dhowells at redhat.com>
> cc: Keith Busch <kbusch at kernel.org>
> cc: Jens Axboe <axboe at fb.com>
> cc: Christoph Hellwig <hch at lst.de>
> cc: Sagi Grimberg <sagi at grimberg.me>
> cc: Chaitanya Kulkarni <kch at nvidia.com>
> cc: "David S. Miller" <davem at davemloft.net>
> cc: Eric Dumazet <edumazet at google.com>
> cc: Jakub Kicinski <kuba at kernel.org>
> cc: Paolo Abeni <pabeni at redhat.com>
> cc: Jens Axboe <axboe at kernel.dk>
> cc: Matthew Wilcox <willy at infradead.org>
> cc: linux-nvme at lists.infradead.org
> cc: netdev at vger.kernel.org
> ---
> 
> Notes:
>     ver #2)
>      - Wrap lines at 80.
> 
>  drivers/nvme/host/tcp.c   | 46 ++++++++++++++++++++-------------------
>  drivers/nvme/target/tcp.c | 46 ++++++++++++++++++++++++---------------
>  2 files changed, 53 insertions(+), 39 deletions(-)
> 
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index bf0230442d57..6f31cdbb696a 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -997,25 +997,25 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
>  	u32 h2cdata_left = req->h2cdata_left;
>  
>  	while (true) {
> +		struct bio_vec bvec;
> +		struct msghdr msg = {
> +			.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
> +		};
>  		struct page *page = nvme_tcp_req_cur_page(req);
>  		size_t offset = nvme_tcp_req_cur_offset(req);
>  		size_t len = nvme_tcp_req_cur_length(req);
>  		bool last = nvme_tcp_pdu_last_send(req, len);
>  		int req_data_sent = req->data_sent;
> -		int ret, flags = MSG_DONTWAIT;
> +		int ret;
>  
>  		if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
> -			flags |= MSG_EOR;
> +			msg.msg_flags |= MSG_EOR;
>  		else
> -			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
> +			msg.msg_flags |= MSG_MORE;
>  
> -		if (sendpage_ok(page)) {
> -			ret = kernel_sendpage(queue->sock, page, offset, len,
> -					flags);
> -		} else {
> -			ret = sock_no_sendpage(queue->sock, page, offset, len,
> -					flags);
> -		}
> +		bvec_set_page(&bvec, page, len, offset);
> +		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
> +		ret = sock_sendmsg(queue->sock, &msg);
>  		if (ret <= 0)
>  			return ret;
>  
> @@ -1054,22 +1054,24 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
>  {
>  	struct nvme_tcp_queue *queue = req->queue;
>  	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
> +	struct bio_vec bvec;
> +	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
>  	bool inline_data = nvme_tcp_has_inline_data(req);
>  	u8 hdgst = nvme_tcp_hdgst_len(queue);
>  	int len = sizeof(*pdu) + hdgst - req->offset;
> -	int flags = MSG_DONTWAIT;
>  	int ret;
>  
>  	if (inline_data || nvme_tcp_queue_more(queue))
> -		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
> +		msg.msg_flags |= MSG_MORE;
>  	else
> -		flags |= MSG_EOR;
> +		msg.msg_flags |= MSG_EOR;
>  
>  	if (queue->hdr_digest && !req->offset)
>  		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
>  
> -	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
> -			offset_in_page(pdu) + req->offset, len,  flags);
> +	bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
> +	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
> +	ret = sock_sendmsg(queue->sock, &msg);
>  	if (unlikely(ret <= 0))
>  		return ret;
>

    struct bio_vec bvec;
    struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | ... };

    ..

    bvec_set_virt
    iov_iter_bvec
    sock_sendmsg

is a frequent pattern. Does it make sense to define a wrapper? Same for bvec_set_page.



More information about the Linux-nvme mailing list