[RFC PATCH 4/5 COMPILE TESTED] nvme-fc: reduce blk_rq_nr_phys_segments calls

Chaitanya Kulkarni chaitanya.kulkarni at wdc.com
Mon Jul 6 19:15:23 EDT 2020


In the fast path blk_rq_nr_phys_segments() is called multiple times
nvme_fc_queue_rq()(1), nvme_fc_map_data()(3) for FC fabric. The function
blk_rq_nr_phys_segments() adds a if check for special payload. The same
check gets repeated number of time we call the function in the fast path.

In order to minimize repetitive check in the fast path this patch
reduces the number of calls to one and adjust the code in submission
path.

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni at wdc.com>
---
 drivers/nvme/host/fc.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e999a8c4b7e8..7f627890e3de 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2462,25 +2462,24 @@ nvme_fc_timeout(struct request *rq, bool reserved)
 
 static int
 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
-		struct nvme_fc_fcp_op *op)
+		struct nvme_fc_fcp_op *op, unsigned short nseg)
 {
 	struct nvmefc_fcp_req *freq = &op->fcp_req;
 	int ret;
 
 	freq->sg_cnt = 0;
 
-	if (!blk_rq_nr_phys_segments(rq))
+	if (!nseg)
 		return 0;
 
 	freq->sg_table.sgl = freq->first_sgl;
-	ret = sg_alloc_table_chained(&freq->sg_table,
-			blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
+	ret = sg_alloc_table_chained(&freq->sg_table, nseg, freq->sg_table.sgl,
 			NVME_INLINE_SG_CNT);
 	if (ret)
 		return -ENOMEM;
 
 	op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
-	WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
+	WARN_ON(op->nents > nseg);
 	freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
 				op->nents, rq_dma_dir(rq));
 	if (unlikely(freq->sg_cnt <= 0)) {
@@ -2538,7 +2537,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
 static blk_status_t
 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 	struct nvme_fc_fcp_op *op, u32 data_len,
-	enum nvmefc_fcp_datadir	io_dir)
+	enum nvmefc_fcp_datadir	io_dir, unsigned short nseg)
 {
 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
 	struct nvme_command *sqe = &cmdiu->sqe;
@@ -2595,7 +2594,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 	sqe->rw.dptr.sgl.addr = 0;
 
 	if (!(op->flags & FCOP_FLAGS_AEN)) {
-		ret = nvme_fc_map_data(ctrl, op->rq, op);
+		ret = nvme_fc_map_data(ctrl, op->rq, op, nseg);
 		if (ret < 0) {
 			nvme_cleanup_cmd(op->rq);
 			nvme_fc_ctrl_put(ctrl);
@@ -2659,6 +2658,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 	struct nvme_fc_queue *queue = hctx->driver_data;
 	struct nvme_fc_ctrl *ctrl = queue->ctrl;
 	struct request *rq = bd->rq;
+	unsigned short nseg = blk_rq_nr_phys_segments(rq);
 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
 	struct nvme_command *sqe = &cmdiu->sqe;
@@ -2683,7 +2683,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 	 * more physical segments in the sg list. If there is no
 	 * physical segments, there is no payload.
 	 */
-	if (blk_rq_nr_phys_segments(rq)) {
+	if (nseg) {
 		data_len = blk_rq_payload_bytes(rq);
 		io_dir = ((rq_data_dir(rq) == WRITE) ?
 					NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
@@ -2693,7 +2693,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 	}
 
 
-	return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
+	return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir, nseg);
 }
 
 static void
@@ -2709,7 +2709,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
 	aen_op = &ctrl->aen_ops[0];
 
 	ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
-					NVMEFC_FCP_NODATA);
+					NVMEFC_FCP_NODATA, 0);
 	if (ret)
 		dev_err(ctrl->ctrl.device,
 			"failed async event work\n");
-- 
2.22.0




More information about the Linux-nvme mailing list