[RFC PATCH 3/5 COMPILE TESTED] nvme-tcp: reduce blk_rq_nr_phys_segments calls

Chaitanya Kulkarni chaitanya.kulkarni at wdc.com
Mon Jul 6 19:15:22 EDT 2020


In the fast patch blk_rq_nr_phys_segments() is called twice for TCP
fabric. The function blk_rq_nr_phys_segments() adds a if check for
special payload. The same check gets repeated number of times we call
the function in the fast path.

In order to minimize repetitive check in the fast path this patch
reduces the number of calls to one and adjust the code in submission
path.

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni at wdc.com>
---
 drivers/nvme/host/tcp.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 860d7ddc2eee..ca0f8f17ef29 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2173,7 +2173,7 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
 }
 
 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
-			struct request *rq)
+			struct request *rq, unsigned short nseg)
 {
 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
@@ -2181,7 +2181,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
 
 	c->common.flags |= NVME_CMD_SGL_METABUF;
 
-	if (!blk_rq_nr_phys_segments(rq))
+	if (!nseg)
 		nvme_tcp_set_sg_null(c);
 	else if (rq_data_dir(rq) == WRITE &&
 	    req->data_len <= nvme_tcp_inline_data_size(queue))
@@ -2196,6 +2196,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 		struct request *rq)
 {
 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+	unsigned short nseg = blk_rq_nr_phys_segments(rq);
 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
 	struct nvme_tcp_queue *queue = req->queue;
 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
@@ -2210,8 +2211,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 	req->data_sent = 0;
 	req->pdu_len = 0;
 	req->pdu_sent = 0;
-	req->data_len = blk_rq_nr_phys_segments(rq) ?
-				blk_rq_payload_bytes(rq) : 0;
+	req->data_len = nseg ? blk_rq_payload_bytes(rq) : 0;
 	req->curr_bio = rq->bio;
 
 	if (rq_data_dir(rq) == WRITE &&
@@ -2233,7 +2233,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 	pdu->hdr.plen =
 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
 
-	ret = nvme_tcp_map_data(queue, rq);
+	ret = nvme_tcp_map_data(queue, rq, nseg);
 	if (unlikely(ret)) {
 		nvme_cleanup_cmd(rq);
 		dev_err(queue->ctrl->ctrl.device,
-- 
2.22.0




More information about the Linux-nvme mailing list