[PATCH v2] nvme-tcp: use in-capsule data for I/O connect

Caleb Sander csander at purestorage.com
Thu Jul 7 14:12:45 PDT 2022


>From the NVMe/TCP spec:
> The maximum amount of in-capsule data for Fabrics and Admin Commands
> is 8,192 bytes ... NVMe/TCP controllers must support in-capsule data
> for Fabrics and Admin Command Capsules

Currently, command data is only sent in-capsule on the admin queue
or I/O queues that indicate support for it.
Send fabrics command data in-capsule for I/O queues too to avoid
needing a separate H2CData PDU for the connect command.

Signed-off-by: Caleb Sander <csander at purestorage.com>
---
 drivers/nvme/host/tcp.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 7a9e6ffa2342..307780d2787a 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -207,13 +207,15 @@ static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
 {
 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 }

-static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
 {
-	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+	if (nvme_is_fabrics(req->req.cmd))
+		return NVME_TCP_ADMIN_CCSZ;
+	return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
 }

 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
 {
 	return req == &req->queue->ctrl->async_req;
@@ -227,11 +229,11 @@ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
 		return false; /* async events don't have a request */

 	rq = blk_mq_rq_from_pdu(req);

 	return rq_data_dir(rq) == WRITE && req->data_len &&
-		req->data_len <= nvme_tcp_inline_data_size(req->queue);
+		req->data_len <= nvme_tcp_inline_data_size(req);
 }

 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
 {
 	return req->iter.bvec->bv_page;
@@ -2370,11 +2372,11 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
 	c->common.flags |= NVME_CMD_SGL_METABUF;

 	if (!blk_rq_nr_phys_segments(rq))
 		nvme_tcp_set_sg_null(c);
 	else if (rq_data_dir(rq) == WRITE &&
-	    req->data_len <= nvme_tcp_inline_data_size(queue))
+	    req->data_len <= nvme_tcp_inline_data_size(req))
 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
 	else
 		nvme_tcp_set_sg_host_data(c, req->data_len);

 	return 0;
@@ -2405,11 +2407,11 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 	req->curr_bio = rq->bio;
 	if (req->curr_bio && req->data_len)
 		nvme_tcp_init_iter(req, rq_data_dir(rq));

 	if (rq_data_dir(rq) == WRITE &&
-	    req->data_len <= nvme_tcp_inline_data_size(queue))
+	    req->data_len <= nvme_tcp_inline_data_size(req))
 		req->pdu_len = req->data_len;

 	pdu->hdr.type = nvme_tcp_cmd;
 	pdu->hdr.flags = 0;
 	if (queue->hdr_digest)
--
2.25.1




More information about the Linux-nvme mailing list