[PATCH 2/3] nvme: retry internal commands if DNR status bit is not set

Hannes Reinecke hare at suse.de
Wed Feb 8 00:49:38 PST 2023


Add a 'retry' argument to __nvme_alloc_rq() to instruct
the function to not set the FAILFAST_DRIVER bit for the command,
causing it to be retried in nvme_decide_disposition() if the DNR
bit is not set in the command result.
And modify the authentication code to allow for retries.

Signed-off-by: Hannes Reinecke <hare at suse.de>
---
 drivers/nvme/host/auth.c       |  3 ++-
 drivers/nvme/host/core.c       | 18 ++++++++++--------
 drivers/nvme/host/fabrics.c    | 10 +++++-----
 drivers/nvme/host/ioctl.c      |  2 +-
 drivers/nvme/host/nvme.h       |  5 +++--
 drivers/nvme/host/pci.c        |  5 +++--
 drivers/nvme/target/passthru.c |  2 +-
 7 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index cbb6f1cb2046..50cd45e92998 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -77,7 +77,8 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
 		cmd.auth_receive.al = cpu_to_le32(data_len);
 	}
 
-	req = __nvme_alloc_rq(q, &cmd, qid == 0 ? NVME_QID_ANY : qid, flags);
+	req = __nvme_alloc_rq(q, &cmd, qid == 0 ? NVME_QID_ANY : qid,
+			      flags, true);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 4e0f61f27823..e7c954eae9c7 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -663,7 +663,8 @@ static inline void nvme_clear_nvme_request(struct request *req)
 }
 
 /* initialize a passthrough request */
-void nvme_init_request(struct request *req, struct nvme_command *cmd)
+void nvme_init_request(struct request *req, struct nvme_command *cmd,
+		       bool retry)
 {
 	if (req->q->queuedata)
 		req->timeout = NVME_IO_TIMEOUT;
@@ -673,7 +674,8 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
 	/* passthru commands should let the driver set the SGL flags */
 	cmd->common.flags &= ~NVME_CMD_SGL_ALL;
 
-	req->cmd_flags |= REQ_FAILFAST_DRIVER;
+	if (!retry)
+		req->cmd_flags |= REQ_FAILFAST_DRIVER;
 	if (req->mq_hctx->type == HCTX_TYPE_POLL)
 		req->cmd_flags |= REQ_POLLED;
 	nvme_clear_nvme_request(req);
@@ -1019,7 +1021,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
 
 struct request *__nvme_alloc_rq(struct request_queue *q,
 				struct nvme_command *cmd, int qid,
-				blk_mq_req_flags_t flags)
+				blk_mq_req_flags_t flags, bool retry)
 {
 	struct request *req;
 
@@ -1029,7 +1031,7 @@ struct request *__nvme_alloc_rq(struct request_queue *q,
 		req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
 						qid - 1);
 	if (!IS_ERR(req))
-		nvme_init_request(req, cmd);
+		nvme_init_request(req, cmd, retry);
 
 	return req;
 }
@@ -1064,7 +1066,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 {
 	struct request *req;
 
-	req = __nvme_alloc_rq(q, cmd, NVME_QID_ANY, 0);
+	req = __nvme_alloc_rq(q, cmd, NVME_QID_ANY, 0, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -1211,7 +1213,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
 	}
 
 	rq = __nvme_alloc_rq(ctrl->admin_q, &ctrl->ka_cmd, NVME_QID_ANY,
-			     BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+			     BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
 	if (IS_ERR(rq)) {
 		/* allocation failure, reset the controller */
 		dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
@@ -1494,7 +1496,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
 	c.features.fid = cpu_to_le32(fid);
 	c.features.dword11 = cpu_to_le32(dword11);
 
-	req = __nvme_alloc_rq(dev->admin_q, &c, NVME_QID_ANY, 0);
+	req = __nvme_alloc_rq(dev->admin_q, &c, NVME_QID_ANY, 0, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -2231,7 +2233,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
 	cmd.common.cdw11 = cpu_to_le32(len);
 
-	req = __nvme_alloc_rq(ctrl->admin_q, &cmd, NVME_QID_ANY, 0);
+	req = __nvme_alloc_rq(ctrl->admin_q, &cmd, NVME_QID_ANY, 0, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index f8e623279b75..d999364af43d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -153,7 +153,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 	cmd.prop_get.fctype = nvme_fabrics_type_property_get;
 	cmd.prop_get.offset = cpu_to_le32(off);
 
-	req = __nvme_alloc_rq(ctrl->fabrics_q, &cmd, NVME_QID_ANY, 0);
+	req = __nvme_alloc_rq(ctrl->fabrics_q, &cmd, NVME_QID_ANY, 0, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -203,7 +203,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 	cmd.prop_get.attrib = 1;
 	cmd.prop_get.offset = cpu_to_le32(off);
 
-	req = __nvme_alloc_rq(ctrl->fabrics_q, &cmd, NVME_QID_ANY, 0);
+	req = __nvme_alloc_rq(ctrl->fabrics_q, &cmd, NVME_QID_ANY, 0, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -252,7 +252,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
 	cmd.prop_set.offset = cpu_to_le32(off);
 	cmd.prop_set.value = cpu_to_le64(val);
 
-	req = __nvme_alloc_rq(ctrl->fabrics_q, &cmd, NVME_QID_ANY, 0);
+	req = __nvme_alloc_rq(ctrl->fabrics_q, &cmd, NVME_QID_ANY, 0, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -413,7 +413,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
 	req = __nvme_alloc_rq(ctrl->fabrics_q, &cmd, NVME_QID_ANY,
-			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -504,7 +504,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
 	req = __nvme_alloc_rq(ctrl->fabrics_q, &cmd, NVME_QID_ANY,
-			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 723e7d5b778f..11f03a0696c2 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -154,7 +154,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
 	req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
 	if (IS_ERR(req))
 		return req;
-	nvme_init_request(req, cmd);
+	nvme_init_request(req, cmd, false);
 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
 	return req;
 }
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0ae5ef8b217f..624f1879ee20 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -774,7 +774,8 @@ static inline enum req_op nvme_req_op(struct nvme_command *cmd)
 }
 
 #define NVME_QID_ANY -1
-void nvme_init_request(struct request *req, struct nvme_command *cmd);
+void nvme_init_request(struct request *req, struct nvme_command *cmd,
+		       bool retry);
 void nvme_cleanup_cmd(struct request *req);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
@@ -814,7 +815,7 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void *buf, unsigned bufflen);
 struct request *__nvme_alloc_rq(struct request_queue *q,
-		struct nvme_command *cmd, int qid, blk_mq_req_flags_t flags);
+		struct nvme_command *cmd, int qid, blk_mq_req_flags_t flags, bool retry);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct request *req, union nvme_result *result,
 		void *buffer, unsigned bufflen, int at_head);
 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8e3bae7d489b..f8ff52ad472e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1377,7 +1377,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
 		 nvmeq->qid);
 
 	abort_req = __nvme_alloc_rq(dev->ctrl.admin_q, &cmd,
-			NVME_QID_ANY, BLK_MQ_REQ_NOWAIT);
+			NVME_QID_ANY, BLK_MQ_REQ_NOWAIT, false);
 	if (IS_ERR(abort_req)) {
 		atomic_inc(&dev->ctrl.abort_limit);
 		return BLK_EH_RESET_TIMER;
@@ -2390,7 +2390,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
 	cmd.delete_queue.opcode = opcode;
 	cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
 
-	req = __nvme_alloc_rq(q, &cmd, NVME_QID_ANY, BLK_MQ_REQ_NOWAIT);
+	req = __nvme_alloc_rq(q, &cmd, NVME_QID_ANY,
+			BLK_MQ_REQ_NOWAIT, false);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 0dae824a2d05..c00ab5ac54db 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -316,7 +316,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
 		timeout = nvmet_req_subsys(req)->admin_timeout;
 	}
 
-	rq = __nvme_alloc_rq(q, req->cmd, NVME_QID_ANY, 0);
+	rq = __nvme_alloc_rq(q, req->cmd, NVME_QID_ANY, 0, false);
 	if (IS_ERR(rq)) {
 		status = NVME_SC_INTERNAL;
 		goto out_put_ns;
-- 
2.35.3




More information about the Linux-nvme mailing list