[PATCH] nvme/tcp: use in-capsule data for fabrics commands

Caleb Sander csander at purestorage.com
Thu Jun 16 07:49:15 PDT 2022


>From the NVMe/TCP spec:
> The maximum amount of in-capsule data for Fabrics and Admin Commands
> is 8,192 bytes ... NVMe/TCP controllers must support in-capsule data
> for Fabrics and Admin Command Capsules

Currently, command data is only sent in-capsule on the admin queue
or I/O queues that indicate support for it.
Send fabrics command data in-capsule for I/O queues too to avoid
needing a separate H2CData PDU.

Signed-off-by: Caleb Sander <csander at purestorage.com>
---
 drivers/nvme/host/tcp.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bb67538d2..f1869ab3c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -207,12 +207,15 @@ static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
 {
 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 }
 
-static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue,
+		struct nvme_tcp_request *req)
 {
+	if (nvme_is_fabrics(req->req.cmd))
+		return NVME_TCP_ADMIN_CCSZ;
 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
 }
 
 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
 {
@@ -227,11 +230,11 @@ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
 		return false; /* async events don't have a request */
 
 	rq = blk_mq_rq_from_pdu(req);
 
 	return rq_data_dir(rq) == WRITE && req->data_len &&
-		req->data_len <= nvme_tcp_inline_data_size(req->queue);
+		req->data_len <= nvme_tcp_inline_data_size(req->queue, req);
 }
 
 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
 {
 	return req->iter.bvec->bv_page;
@@ -2368,11 +2371,11 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
 	c->common.flags |= NVME_CMD_SGL_METABUF;
 
 	if (!blk_rq_nr_phys_segments(rq))
 		nvme_tcp_set_sg_null(c);
 	else if (rq_data_dir(rq) == WRITE &&
-	    req->data_len <= nvme_tcp_inline_data_size(queue))
+	    req->data_len <= nvme_tcp_inline_data_size(queue, req))
 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
 	else
 		nvme_tcp_set_sg_host_data(c, req->data_len);
 
 	return 0;
@@ -2403,11 +2406,11 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 	req->curr_bio = rq->bio;
 	if (req->curr_bio && req->data_len)
 		nvme_tcp_init_iter(req, rq_data_dir(rq));
 
 	if (rq_data_dir(rq) == WRITE &&
-	    req->data_len <= nvme_tcp_inline_data_size(queue))
+	    req->data_len <= nvme_tcp_inline_data_size(queue, req))
 		req->pdu_len = req->data_len;
 
 	pdu->hdr.type = nvme_tcp_cmd;
 	pdu->hdr.flags = 0;
 	if (queue->hdr_digest)
-- 
2.25.1




More information about the Linux-nvme mailing list